Qwen2-KRX-LoRA
์ด ๋ชจ๋ธ์ Qwen2.5-7B-Instruct ๋ชจ๋ธ์ ๊ธ์ต ๋ฐ์ดํฐ์ ์ผ๋ก ํ์ธํ๋ํ ๋ฒ์ ์ ๋๋ค.
Usage
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, PeftConfig
# 1. Load base model
base_model_id = "Qwen/Qwen2.5-7B-Instruct"
base_model = AutoModelForCausalLM.from_pretrained(base_model_id)
tokenizer = AutoTokenizer.from_pretrained(base_model_id)
# 2. Load LoRA adapter
peft_model_id = "seong67360/Qwen2.5-7B-Instruct_v1"
model = PeftModel.from_pretrained(base_model, peft_model_id)