tiendung commited on
Commit
1e9512b
·
verified ·
1 Parent(s): 438ecdd

Update llm.py

Browse files
Files changed (1) hide show
  1. llm.py +2 -2
llm.py CHANGED
@@ -2,7 +2,7 @@
2
  import utils; from utils import *
3
  import os, sys, lzma, json, pprint, time, subprocess
4
 
5
- thinker = os.getenv("thinker", "405b")
6
  TEMPERATURE = float(os.getenv("temperature", 0.1)) # 0.0 conservative (good for coding and correct syntax)
7
 
8
  LLM_HOST = "gemini"
@@ -82,7 +82,7 @@ elif thinker in "70b|405b":
82
  # https://docs.together.ai/docs/chat-models#hosted-models
83
  model = {
84
  "405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo 128k", # $3.50 / 1m tokens(*)
85
- "70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo 128k", # $0.88 / 1m tokens(*)
86
  }[thinker]
87
 
88
  model, CTXLEN = model.strip().split()
 
2
  import utils; from utils import *
3
  import os, sys, lzma, json, pprint, time, subprocess
4
 
5
+ thinker = os.getenv("thinker", "70b")
6
  TEMPERATURE = float(os.getenv("temperature", 0.1)) # 0.0 conservative (good for coding and correct syntax)
7
 
8
  LLM_HOST = "gemini"
 
82
  # https://docs.together.ai/docs/chat-models#hosted-models
83
  model = {
84
  "405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo 128k", # $3.50 / 1m tokens(*)
85
+ "70b": "metameta-llama/Llama-3.3-70B-Instruct-Turbo 128k", # $0.88 / 1m tokens(*)
86
  }[thinker]
87
 
88
  model, CTXLEN = model.strip().split()