erida-luminia / README.md
Gleb Bakule
Update README.md
f95cd18 verified
metadata
language:
  - ru

small distill model

from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer

model_name = "BestPechenka/erida-luminia"

custom_config = {
    "rope_scaling": {
        "type": "llama3",
        "factor": 8.0,
        "high_freq_factor": 4.0,
        "low_freq_factor": 1.0,
        "original_max_position_embeddings": 8192,
        "rope_type": "llama3"
    }
}

config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
config.update(custom_config)

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    config=config,
    trust_remote_code=True
)

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

saiga_prompt = """Напишите подходящий ответ!

{dialog}

Ассистент: """

dialog = []


def format_dialog(dialog):
    formatted_dialog = ""
    for turn in dialog:
        if turn["role"] == "user":
            formatted_dialog += f"Пользователь: {turn['content']}\n\n"
        elif turn["role"] == "bot":
            formatted_dialog += f"Ассистент: {turn['content']}\n\n"
    return formatted_dialog


def generate_text(prompt):
    inputs = tokenizer([saiga_prompt.format(dialog=format_dialog(dialog) + prompt)], return_tensors="pt").to(
        model.device)
    outputs = model.generate(**inputs, max_new_tokens=
    64)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)


while True:
    user_input = input(">>> ")
    dialog.append({"role": "bot", "content": "Привет! Меня зовут erida-luminia."})

    dialog.append({"role": "user", "content": user_input})

    generated_text = generate_text(user_input)
    print(generated_text)

    dialog.append({"role": "bot", "content": generated_text})