project1 / config.yaml
teamalphabmsit's picture
Upload folder using huggingface_hub
44504f7 verified
raw
history blame
1.99 kB
#================================================================================================
# Config of the LLMs
#================================================================================================
conv_model : "gpt-4o-mini" # the conversation model
programmer_model : "gpt-4o-mini"
inspector_model : "gpt-4o-mini"
api_key : ""
base_url_conv_model : 'https://api.openai.com/v1'
base_url_programmer : 'https://api.openai.com/v1'
base_url_inspector : 'htts://api.openai.com/v1'
max_token_conv_model: 4096 # the max token of the conversation model, this will determine the maximum length of the report.
# conv_model: "gemini-1.5-flash" # the conversation model for high-quality responses
# programmer_model: "gemini-1.5-flash" # model for code-related tasks
# inspector_model: "gemini-1.5-flash" # model for error-checking and debugging
# api_key: "AIzaSyA_BrRio97V_i7-sQ41EzhNqNqyPE5Ny9E" # replace with your API key
# base_url_conv_model: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash'
# base_url_programmer: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash'
# base_url_inspector: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash'
# max_token_conv_model: 4096 # the maximum token limit for generating comprehensive reports
#================================================================================================
# Config of the system
#================================================================================================
streaming : True
#cache_related
oss_endpoint: ""
oss_access_key_id: ""
oss_access_secret: ""
oss_bucket_name: ""
expired_time: 36000 # The expired time of the link in cache
cache_dir : "" # local cache dir
max_attempts : 5 # The max attempts of self-correcting
max_exe_time: 18000 # max time for the execution
#knowledge integration
retrieval : False
mode : "full"