--- hub: repo_id: winglian/manticore-13b-chat-pyg-ggml filename: manticore-13b-chat-pyg-q5_1.ggml.bin use_auth_token: true llama_cpp: n_ctx: 2048 n_gpu_layers: 40 # llama 13b has 40 layers chat: stop: - "" - "" - "### USER:" - "USER:" queue: max_size: 16 concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app