aicypress / app.py
amihai85's picture
Update app.py
8363f0b verified
import gradio as gr
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, DataCollatorForSeq2Seq
# Load the dataset
dataset = load_dataset("json", data_files="dataset.jsonl")
# Load the pre-trained model and tokenizer
model_name = "Salesforce/codegen-2B-multi"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
# Tokenize the dataset
def tokenize_function(examples):
return tokenizer(
examples["input"],
text_target=examples["output"],
truncation=True, # Truncate sequences longer than max_length
max_length=512, # Adjust max length if needed
padding="max_length" # Pad sequences to max_length
)
tokenized_dataset = dataset.map(tokenize_function, batched=True)
for i, example in enumerate(tokenized_dataset["train"]):
input_len = len(example["input_ids"])
output_len = len(example["labels"])
print(f"Example {i}: Input length = {input_len}, Output length = {output_len}")
# Define training arguments
training_args = TrainingArguments(
output_dir="./results",
per_device_train_batch_size=1, # Smaller batch size
gradient_accumulation_steps=8, # Accumulate gradients to simulate larger batch size
num_train_epochs=3,
logging_dir="./logs",
logging_strategy="steps",
save_strategy="epoch",
eval_strategy="epoch",
learning_rate=5e-5,
overwrite_output_dir=True,
)
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
padding=True, # Enable dynamic padding
return_tensors="pt"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["train"],
data_collator=data_collator, # Use dynamic padding
)
# Train the model
trainer.train()
# Save the fine-tuned model
trainer.save_model("./fine_tuned_model")
tokenizer.save_pretrained("./fine_tuned_model")
# Load the fine-tuned model for inference
fine_tuned_model = AutoModelForCausalLM.from_pretrained("./fine_tuned_model")
fine_tuned_tokenizer = AutoTokenizer.from_pretrained("./fine_tuned_model")
# Define a Gradio interface for testing the model
def generate_cypress_code(prompt):
inputs = fine_tuned_tokenizer(prompt, return_tensors="pt")
outputs = fine_tuned_model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1)
return fine_tuned_tokenizer.decode(outputs[0], skip_special_tokens=True)
# Launch the Gradio interface
interface = gr.Interface(
fn=generate_cypress_code,
inputs="text",
outputs="text",
title="Cypress Test Generator",
description="Enter a description of the test you want to generate Cypress code for.",
)
interface.launch()