File size: 612 Bytes
ffe4c62
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load the Arabic GPT-2 model
model_name = "aubmindlab/aragpt2-medium"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Function to generate chatbot responses
def generate_response(context, question):
    input_text = context + "\nسؤال: " + question + "\nإجابة: "
    input_ids = tokenizer(input_text, return_tensors="pt").input_ids
    output = model.generate(input_ids, max_length=150, num_return_sequences=1)
    return tokenizer.decode(output[0], skip_special_tokens=True)