docs / huggingface_huggingface-llama-recipes.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: huggingface-llama-recipes-main/assisted_decoding.py
from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import torch
WARMUP = 2
MAX_NEW_TOKENS = 10
DO_SAMPLE = True
ATOL = 1e-06
TORCH_DTYPE = torch.float32
PROMPT = 'Alice and Bob '
CHECKPOINT = 'meta-llama/Meta-Llama-3-405B'
ASSISTED_CHECKPOINT = 'meta-llama/Meta-Llama-3.1-8B'
model = AutoModelForCausalLM.from_pretrained(CHECKPOINT, device_map='auto', torch_dtype=TORCH_DTYPE)
assistant_model = AutoModelForCausalLM.from_pretrained(ASSISTED_CHECKPOINT, device_map='auto', torch_dtype=TORCH_DTYPE)
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
inputs = tokenizer(PROMPT, return_tensors='pt').to(model.device)
for _ in range(WARMUP):
model.generate(**inputs, assistant_model=assistant_model)
start = time.time()
assisted_outputs = model.generate(**inputs, assistant_model=assistant_model)
end = time.time()
assisted_gen_text = tokenizer.batch_decode(assisted_outputs, skip_special_tokens=True)
print(assisted_gen_text)
print(f'\nAssisted time taken: {end - start:.2f}s')
# File: huggingface-llama-recipes-main/awq_generation.py
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, AwqConfig
model_id = 'hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4'
quantization_config = AwqConfig(bits=4, fuse_max_seq_len=512, do_fuse=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='auto', quantization_config=quantization_config)
messages = [{'role': 'system', 'content': 'You are a pirate'}, {'role': 'user', 'content': "What's Deep Leaning?"}]
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors='pt', return_dict=True).to('cuda')
outputs = model.generate(**inputs, do_sample=True, max_new_tokens=256)
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
# File: huggingface-llama-recipes-main/gptq_generation.py
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = 'hugging-quants/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4'
messages = [{'role': 'system', 'content': 'You are a pirate'}, {'role': 'user', 'content': "What's Deep Leaning?"}]
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='auto')
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors='pt', return_dict=True).to('cuda')
outputs = model.generate(**inputs, do_sample=True, max_new_tokens=256)
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
# File: huggingface-llama-recipes-main/peft_finetuning.py
import torch
from datasets import load_dataset
from trl import SFTTrainer
from peft import LoraConfig
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments
model_id = 'meta-llama/Meta-Llama-3.1-8B'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
dataset = load_dataset('imdb', split='train')
training_args = TrainingArguments(output_dir='./results', num_train_epochs=3, per_device_train_batch_size=4, logging_dir='./logs', logging_steps=10)
QLoRA = True
if QLoRA:
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type='nf4')
lora_config = LoraConfig(r=8, target_modules='all-linear', bias='none', task_type='CAUSAL_LM')
else:
lora_config = None
trainer = SFTTrainer(model=model, tokenizer=tokenizer, args=training_args, peft_config=lora_config, train_dataset=dataset, dataset_text_field='text')
trainer.train()
# File: huggingface-llama-recipes-main/prompt_reuse.py
import os, torch, copy
from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache
device = 'cuda'
ckpt = 'meta-llama/Meta-Llama-3.1-8B-Instruct'
INITIAL_PROMPT = 'From now on, you are going to answer all my questions with historical details. Make sure to always add a bit of french here and there, for style.'
model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
model.to(device)
tokenizer = AutoTokenizer.from_pretrained(ckpt)
prompt_cache = DynamicCache()
inputs = tokenizer(INITIAL_PROMPT, return_tensors='pt').to('cuda')
prompt_cache = model(**inputs, past_key_values=prompt_cache).past_key_values
prompt = 'Why are french people obsessed with french?'
new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors='pt').to('cuda')
past_key_values = copy.deepcopy(prompt_cache)
outputs = model.generate(**new_inputs, past_key_values=past_key_values, max_new_tokens=20)
response = tokenizer.batch_decode(outputs)[0]
print(response)
''
prompt = 'What is the best city to swim in?'
new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors='pt').to('cuda')
outputs = model.generate(**new_inputs, past_key_values=copy.deepcopy(prompt_cache), max_new_tokens=20)
response = tokenizer.batch_decode(outputs)[0]
print(response)
''
# File: huggingface-llama-recipes-main/quantized_cache.py
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
device = 'cuda'
ckpt = 'meta-llama/Meta-Llama-3.1-8B-Instruct'
model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
model.to(device)
tokenizer = AutoTokenizer.from_pretrained(ckpt)
prompt = 'Explain the thre body problem'
inputs = tokenizer(prompt, return_tensors='pt').to('cuda')
outputs = model.generate(**inputs, cache_implementation='quantized', do_sample=True, max_new_tokens=256)
response = tokenizer.batch_decode(outputs)[0]
print(response)
''
from transformers import QuantizedCacheConfig
cache_config = QuantizedCacheConfig(backend='HQQ', nbits=4, axis_key=0, axis_value=1, compute_dtype=torch.float16, device=model.device)
out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation='quantized', cache_config=cache_config)
print(tokenizer.batch_decode(out, skip_special_tokens=True))
''
# File: huggingface-llama-recipes-main/torch_compile.py
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
device = 'cuda'
ckpt = 'meta-llama/Meta-Llama-3.1-8B-Instruct'
model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
model.to(device)
tokenizer = AutoTokenizer.from_pretrained(ckpt)
prompt = 'Why dogs are so cute?'
inputs = tokenizer(prompt, return_tensors='pt').to(device)
model.generation_config.max_length = 128
outputs = model.generate(**inputs, do_sample=False)
response = tokenizer.batch_decode(outputs)[0]
print(response)
model.forward = torch.compile(model.forward, mode='reduce-overhead', fullgraph=True)
model.generation_config.cache_implementation = 'static'
outputs = model.generate(**inputs, do_sample=False)
response = tokenizer.batch_decode(outputs)[0]
outputs = model.generate(**inputs, do_sample=False)
response = tokenizer.batch_decode(outputs)[0]
outputs = model.generate(**inputs, do_sample=False)
response = tokenizer.batch_decode(outputs)[0]
print(response)