File size: 1,510 Bytes
8679a35
 
 
8b37c20
8679a35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b37c20
 
d4b0956
8679a35
 
 
 
0345d26
8679a35
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/bin/sh


printf "Running meta-llama/Llama-3.2-3B-Instruct using vLLM OpenAI compatible API Server at port %s\n" "7860"

# Llama-3.2-3B-Instruct max context length is 131072, but we reduce it to 32k.
# 32k tokens, 3/4 of 32k is 24k words, each page average is 500 or 0.5k words,
# so that's basically 24k / .5k = 24 x 2 =~48 pages.
# Because when we use maximum token length, it will be slower and the memory is not enough for T4.
# https://github.com/vllm-project/vllm/blob/v0.6.4/vllm/config.py#L85-L86
# https://github.com/vllm-project/vllm/blob/v0.6.4/vllm/config.py#L98-L102
# [rank0]:     raise ValueError(
# [rank0]: ValueError: The model's max seq len (131072)
#   is larger than the maximum number of tokens that can be stored in KV cache (57056).
#   Try increasing `gpu_memory_utilization` or decreasing `max_model_len` when initializing the engine.
#
# Actually, the meta-llama/Llama-3.2-3B-Instruct rev 0cb88a4f764b7a12671c53f0838cd831a0843b95
# is enough with T4 16GB, but for the sake of the performance and comparing with the same
# params with the sail/Sailor-1.8B-Chat, I use the
# meta-llama/Llama-3.2-1B-Instruct rev 9213176726f574b556790deb65791e0c5aa438b6
python -u /app/openai_compatible_api_server.py \
    --model meta-llama/Llama-3.2-3B-Instruct \
    --revision 0cb88a4f764b7a12671c53f0838cd831a0843b95 \
    --seed 42 \
    --host 0.0.0.0 \
    --port 7860 \
    --max-num-batched-tokens 32768 \
    --max-model-len 32768 \
    --dtype float16 \
    --gpu-memory-utilization 0.85