import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer from peft import PeftModel import torch base_model = "Qwen/Qwen2.5-7B" # базовая модель adapter_model = "muhtasham/tajik-qwen2.5-7b_finetuned" # таджикский fine-tune tokenizer = AutoTokenizer.from_pretrained(base_model) # Загружаем базовую модель model = AutoModelForCausalLM.from_pretrained( base_model, torch_dtype="auto", low_cpu_mem_usage=True, device_map="cpu" ) # Подключаем LoRA адаптер model = PeftModel.from_pretrained(model, adapter_model) def chat(prompt): try: inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=150, temperature=0.8, top_p=0.9) return tokenizer.decode(outputs[0], skip_special_tokens=True) except Exception as e: return f"⚠️ Error: {str(e)}" demo = gr.Interface( fn=chat, inputs=gr.Textbox(lines=3, label="Введите запрос (на таджикском, русском или английском)"), outputs="text", title="Tajik-Qwen2.5-7B Demo (LoRA)", description="Файнтюнинг модели Qwen2.5-7B на таджикском языке." ) if __name__ == "__main__": demo.launch()