infini-gram / constants.py
liujch1998's picture
Sync changes
555cd42
raw
history blame
1.97 kB
import os
# options
INDEX_BY_DESC = {
'Dolma-v1.6 (3.1T tokens)': 'v4_dolma-v1_6_llama',
'RedPajama (1.4T tokens)': 'v4_rpj_llama_s4',
'Pile-train (380B tokens)': 'v4_piletrain_llama',
'C4-train (200B tokens)': 'v4_c4train_llama',
'Pile-val (390M tokens)': 'v4_pileval_llama',
# 'Pile-val (GPT-2 tokenizer), 380M tokens': 'v4_pileval_gpt2',
# 'Dolma-v1.6-sample (OLMo tokenizer), 8.0B tokens': 'v4_dolmasample_olmo',
# 'Dolma-v1.6-sample (9.2B tokens)': 'v4_dolma-v1_6-sample_llama',
# 'Dolma-v1.6-wiki (4.3B tokens)': 'v4_dolma-v1_6-wiki_llama',
# 'Dolma-v1.6-books (5.8B tokens)': 'v4_dolma-v1_6-books_llama',
# 'Dolma-v1.6-pes2o (69B tokens)': 'v4_dolma-v1_6-pes2o_llama',
# 'Dolma-v1.6-reddit (89B tokens)': 'v4_dolma-v1_6-reddit_llama',
# 'Dolma-v1.6-c4 (200B tokens)': 'v4_dolma-v1_6-c4_llama',
# 'Dolma-v1.6-stack (420B tokens)': 'v4_dolma-v1_6-stack_llama',
# 'Dolma-v1.6-cc_en_head (660B tokens): 'v4_dolma-v1_6-cc_en_head_llama',
# 'Dolma-v1.6-cc_en_middle (650B tokens): 'v4_dolma-v1_6-cc_en_middle_llama',
# 'Dolma-v1.6-cc_en_tail (970B tokens): 'v4_dolma-v1_6-cc_en_tail_llama',
}
INDEX_DESCS = list(INDEX_BY_DESC.keys())
# API limits
MAX_QUERY_CHARS = int(os.environ.get('MAX_QUERY_CHARS', 1000))
MAX_CLAUSES_PER_CNF = int(os.environ.get('MAX_CLAUSES_PER_CNF', 4))
MAX_TERMS_PER_CLAUSE = int(os.environ.get('MAX_TERMS_PER_CLAUSE', 4))
MAX_SUPPORT = int(os.environ.get('MAX_SUPPORT', 1000))
MAX_CLAUSE_FREQ = int(os.environ.get('MAX_CLAUSE_FREQ', 50000))
MAX_DIFF_TOKENS = int(os.environ.get('MAX_DIFF_TOKENS', 100))
MAXNUM = int(os.environ.get('MAXNUM', 10))
MAX_DISP_LEN = int(os.environ.get('MAX_DISP_LEN', 5000))
# HF demo
API_URL = os.environ.get('API_URL', None)
DEFAULT_CONCURRENCY_LIMIT = os.environ.get('DEFAULT_CONCURRENCY_LIMIT', 10)
MAX_SIZE = os.environ.get('MAX_SIZE', 100)
MAX_THREADS = os.environ.get('MAX_THREADS', 40)
DEBUG = (os.environ.get('DEBUG', 'False') != 'False')