Vortex
Collection
ModelCloud optimized and validated quants that pass/meet strict quality assurance on multiple benchmarks.
•
15 items
•
Updated
•
7
This model was quantized and exported to mlx using GPTQModel.
# install mlx
pip install mlx_lm
from mlx_lm import load, generate
mlx_path = "ModelCloud/Llama-3.2-1B-Instruct-gptqmodel-4bit-vortex-mlx-v2.5"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"
)
text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
from gptqmodel import GPTQModel
# load gptq quantized model
gptq_model_path = "ModelCloud/Llama-3.2-1B-Instruct-gptqmodel-4bit-vortex-v2.5"
mlx_path = f"./vortex/Llama-3.2-1B-Instruct-gptqmodel-4bit-vortex-mlx-v2.5"
# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")