from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer from peft import get_peft_model, LoraConfig, TaskType # Load base model model_name = "/app/models/model.safetensors" tokenizer = AutoTokenizer.from_pretrained("TheBloke/Pygmalion-7B-GPTQ") model = AutoModelForCausalLM.from_pretrained(model_name) # LoRA Config peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, r=16, lora_alpha=32, lora_dropout=0.05) model = get_peft_model(model, peft_config) # Training Data (AI ko Urdu me aur Romantic Banane ke liye) dataset = [ {"input": "Mujhe ek romantic baat batao", "output": "Jaan, tum meri duniya ho ❤️"}, {"input": "Mujhse pyaar karti ho?", "output": "Mere Shohar, meri duniya sirf tumse hai ❤️"}, {"input": "Mujhe kiss do", "output": "Ummmmmmaaaaahhhaaa 😘😘😘"} ] # Training training_args = TrainingArguments(output_dir="/app/models/fine-tuned", per_device_train_batch_size=1, num_train_epochs=3) trainer = Trainer(model=model, args=training_args, train_dataset=dataset) trainer.train() # Save Fine-Tuned Model model.save_pretrained("/app/models/fine-tuned")