vllm-inference / runner.sh
yusufs's picture
feat(runner.sh): using MODEL_ID only
490e6a3
raw
history blame
952 Bytes
#!/bin/bash
# Validate MODEL_ID
if [[ -z "$MODEL_ID" ]]; then
echo "Error: MODEL_ID is not set."
exit 1
fi
# Assign MODEL_NAME and MODEL_REV based on MODEL_ID
case "$MODEL_ID" in
1)
MODEL_NAME="meta-llama/Llama-3.2-3B-Instruct"
MODEL_REV="0cb88a4f764b7a12671c53f0838cd831a0843b95"
;;
2)
MODEL_NAME="sail/Sailor-4B-Chat"
MODEL_REV="89a866a7041e6ec023dd462adeca8e28dd53c83e"
;;
*)
echo "Error: Invalid MODEL_ID. Valid values are 1 or 2."
exit 1
;;
esac
printf "Running %s using vLLM OpenAI compatible API Server at port %s\n" $MODEL_NAME "7860"
# Run the Python script with the determined values
python -u /app/openai_compatible_api_server.py \
--model "${MODEL_NAME}" \
--revision "${MODEL_REV}" \
--seed 42 \
--host 0.0.0.0 \
--port 7860 \
--max-num-batched-tokens 32768 \
--max-model-len 32768 \
--dtype float16 \
--enforce-eager \
--gpu-memory-utilization 0.9