Thomas (Tom) Gardos commited on
Commit
7466d9d
Β·
unverified Β·
2 Parent(s): a8421b2 9c40653

Merge pull request #89 from DL4DS/code_restructure

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .gitignore +12 -1
  2. Dockerfile +11 -3
  3. Dockerfile.dev +11 -4
  4. README.md +20 -9
  5. {code β†’ apps/ai_tutor}/.chainlit/config.toml +1 -1
  6. apps/ai_tutor/README.md +12 -0
  7. {code β†’ apps/ai_tutor}/app.py +55 -20
  8. code/main.py β†’ apps/ai_tutor/chainlit_app.py +20 -26
  9. {code/modules β†’ apps/ai_tutor}/config/config.yml +5 -5
  10. apps/ai_tutor/config/config_manager.py +188 -0
  11. {code/modules β†’ apps/ai_tutor}/config/constants.py +1 -9
  12. apps/ai_tutor/config/project_config.yml +20 -0
  13. {code/modules β†’ apps/ai_tutor}/config/prompts.py +0 -0
  14. apps/ai_tutor/encrypt_students.py +53 -0
  15. apps/ai_tutor/helpers.py +94 -0
  16. apps/ai_tutor/private/placeholder_students_file.json +5 -0
  17. code/public/avatars/ai_tutor.png β†’ apps/ai_tutor/public/assets/images/avatars/ai-tutor.png +0 -0
  18. code/public/logo_dark.png β†’ apps/ai_tutor/public/assets/images/avatars/ai_tutor.png +0 -0
  19. {code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/acastusphoton-svgrepo-com.svg +0 -0
  20. {code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/adv-screen-recorder-svgrepo-com.svg +0 -0
  21. {code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/alarmy-svgrepo-com.svg +0 -0
  22. {code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/calendar-samsung-17-svgrepo-com.svg +0 -0
  23. apps/ai_tutor/public/files/students_encrypted.json +1 -0
  24. apps/ai_tutor/public/files/test.css +32 -0
  25. code/public/logo_light.png β†’ apps/ai_tutor/public/logo_dark.png +0 -0
  26. apps/ai_tutor/public/logo_light.png +0 -0
  27. {storage β†’ apps/ai_tutor/storage}/data/urls.txt +0 -0
  28. {code β†’ apps/ai_tutor}/templates/cooldown.html +1 -1
  29. {code β†’ apps/ai_tutor}/templates/dashboard.html +1 -1
  30. {code β†’ apps/ai_tutor}/templates/error.html +0 -0
  31. {code β†’ apps/ai_tutor}/templates/error_404.html +0 -0
  32. {code β†’ apps/ai_tutor}/templates/login.html +1 -1
  33. {code β†’ apps/ai_tutor}/templates/logout.html +0 -0
  34. apps/ai_tutor/templates/unauthorized.html +94 -0
  35. apps/chainlit_base/.chainlit/config.toml +120 -0
  36. apps/chainlit_base/chainlit.md +14 -0
  37. {code β†’ apps/chainlit_base}/chainlit_base.py +30 -94
  38. apps/chainlit_base/config/config.yml +60 -0
  39. apps/chainlit_base/config/config_manager.py +174 -0
  40. {code/modules β†’ apps/chainlit_base}/config/project_config.yml +4 -1
  41. apps/chainlit_base/config/prompts.py +97 -0
  42. apps/chainlit_base/public/assets/images/avatars/ai-tutor.png +0 -0
  43. apps/chainlit_base/public/assets/images/avatars/ai_tutor.png +0 -0
  44. apps/chainlit_base/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg +2 -0
  45. apps/chainlit_base/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg +2 -0
  46. apps/chainlit_base/public/assets/images/starter_icons/alarmy-svgrepo-com.svg +2 -0
  47. apps/chainlit_base/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg +36 -0
  48. apps/chainlit_base/public/files/students_encrypted.json +1 -0
  49. {code/public β†’ apps/chainlit_base/public/files}/test.css +8 -5
  50. apps/chainlit_base/public/logo_dark.png +0 -0
.gitignore CHANGED
@@ -169,5 +169,16 @@ code/.chainlit/translations/
169
  storage/logs/*
170
  vectorstores/*
171
 
172
- */.files/*
173
  code/storage/models/
 
 
 
 
 
 
 
 
 
 
 
 
169
  storage/logs/*
170
  vectorstores/*
171
 
172
+ **/.files/*
173
  code/storage/models/
174
+
175
+ **/translations/en-US.json
176
+ **/translations/zh-CN.json
177
+
178
+
179
+ **/vectorstores/*
180
+
181
+ **/private/students.json
182
+
183
+ **/apps/*/storage/logs/*
184
+ **/apps/*/private/*
Dockerfile CHANGED
@@ -3,13 +3,18 @@ FROM python:3.11
3
  WORKDIR /code
4
 
5
  COPY ./requirements.txt /code/requirements.txt
 
6
 
7
  RUN pip install --upgrade pip
8
 
9
  RUN pip install --no-cache-dir -r /code/requirements.txt
 
10
 
11
  COPY . /code
12
 
 
 
 
13
  # List the contents of the /code directory to verify files are copied correctly
14
  RUN ls -R /code
15
 
@@ -17,12 +22,15 @@ RUN ls -R /code
17
  RUN chmod -R 777 /code
18
 
19
  # Create a logs directory and set permissions
20
- RUN mkdir /code/logs && chmod 777 /code/logs
21
 
22
  # Create a cache directory within the application's working directory
23
  RUN mkdir /.cache && chmod -R 777 /.cache
24
 
25
- WORKDIR /code/code
 
 
 
26
 
27
  RUN --mount=type=secret,id=HUGGINGFACEHUB_API_TOKEN,mode=0444,required=true
28
  RUN --mount=type=secret,id=OPENAI_API_KEY,mode=0444,required=true
@@ -35,4 +43,4 @@ RUN --mount=type=secret,id=LITERAL_API_KEY_LOGGING,mode=0444,required=true
35
  RUN --mount=type=secret,id=CHAINLIT_AUTH_SECRET,mode=0444,required=true
36
 
37
  # Default command to run the application
38
- CMD ["sh", "-c", "python -m modules.vectorstore.store_manager && uvicorn app:app --host 0.0.0.0 --port 7860"]
 
3
  WORKDIR /code
4
 
5
  COPY ./requirements.txt /code/requirements.txt
6
+ COPY ./setup.py /code/setup.py
7
 
8
  RUN pip install --upgrade pip
9
 
10
  RUN pip install --no-cache-dir -r /code/requirements.txt
11
+ RUN pip install -e .
12
 
13
  COPY . /code
14
 
15
+ # Copy .env file to the application directory
16
+ COPY .env /code/apps/ai_tutor/.env
17
+
18
  # List the contents of the /code directory to verify files are copied correctly
19
  RUN ls -R /code
20
 
 
22
  RUN chmod -R 777 /code
23
 
24
  # Create a logs directory and set permissions
25
+ RUN mkdir /code/apps/ai_tutor/logs && chmod 777 /code/apps/ai_tutor/logs
26
 
27
  # Create a cache directory within the application's working directory
28
  RUN mkdir /.cache && chmod -R 777 /.cache
29
 
30
+ WORKDIR /code/apps/ai_tutor
31
+
32
+ # Expose the port the app runs on
33
+ EXPOSE 7860
34
 
35
  RUN --mount=type=secret,id=HUGGINGFACEHUB_API_TOKEN,mode=0444,required=true
36
  RUN --mount=type=secret,id=OPENAI_API_KEY,mode=0444,required=true
 
43
  RUN --mount=type=secret,id=CHAINLIT_AUTH_SECRET,mode=0444,required=true
44
 
45
  # Default command to run the application
46
+ CMD python -m modules.vectorstore.store_manager --config_file config/config.yml --project_config_file config/project_config.yml && python -m uvicorn app:app --host 0.0.0.0 --port 7860
Dockerfile.dev CHANGED
@@ -3,13 +3,18 @@ FROM python:3.11
3
  WORKDIR /code
4
 
5
  COPY ./requirements.txt /code/requirements.txt
 
6
 
7
  RUN pip install --upgrade pip
8
 
9
  RUN pip install --no-cache-dir -r /code/requirements.txt
 
10
 
11
  COPY . /code
12
 
 
 
 
13
  # List the contents of the /code directory to verify files are copied correctly
14
  RUN ls -R /code
15
 
@@ -17,15 +22,17 @@ RUN ls -R /code
17
  RUN chmod -R 777 /code
18
 
19
  # Create a logs directory and set permissions
20
- RUN mkdir /code/logs && chmod 777 /code/logs
21
 
22
  # Create a cache directory within the application's working directory
23
  RUN mkdir /.cache && chmod -R 777 /.cache
24
 
25
- WORKDIR /code/code
 
 
26
 
27
  # Expose the port the app runs on
28
- EXPOSE 8000
29
 
30
  # Default command to run the application
31
- CMD ["sh", "-c", "python -m modules.vectorstore.store_manager && chainlit run main.py --host 0.0.0.0 --port 8000"]
 
3
  WORKDIR /code
4
 
5
  COPY ./requirements.txt /code/requirements.txt
6
+ COPY ./setup.py /code/setup.py
7
 
8
  RUN pip install --upgrade pip
9
 
10
  RUN pip install --no-cache-dir -r /code/requirements.txt
11
+ RUN pip install -e .
12
 
13
  COPY . /code
14
 
15
+ # Copy .env file to the application directory
16
+ COPY .env /code/apps/ai_tutor/.env
17
+
18
  # List the contents of the /code directory to verify files are copied correctly
19
  RUN ls -R /code
20
 
 
22
  RUN chmod -R 777 /code
23
 
24
  # Create a logs directory and set permissions
25
+ RUN mkdir /code/apps/ai_tutor/logs && chmod 777 /code/apps/ai_tutor/logs
26
 
27
  # Create a cache directory within the application's working directory
28
  RUN mkdir /.cache && chmod -R 777 /.cache
29
 
30
+ WORKDIR /code/apps/ai_tutor
31
+
32
+ RUN ls -R /code
33
 
34
  # Expose the port the app runs on
35
+ EXPOSE 7860
36
 
37
  # Default command to run the application
38
+ CMD python -m modules.vectorstore.store_manager --config_file config/config.yml --project_config_file config/project_config.yml && python -m uvicorn app:app --host 0.0.0.0 --port 7860
README.md CHANGED
@@ -9,6 +9,12 @@ app_port: 7860
9
  ---
10
  # DL4DS Tutor πŸƒ
11
 
 
 
 
 
 
 
12
  Check out the configuration reference at [Hugging Face Spaces Config Reference](https://huggingface.co/docs/hub/spaces-config-reference).
13
 
14
  You can find a "production" implementation of the Tutor running live at [DL4DS Tutor](https://dl4ds-dl4ds-tutor.hf.space/) from the
@@ -30,26 +36,31 @@ Please visit [setup](https://dl4ds.github.io/dl4ds_tutor/guide/setup/) for more
30
  git clone https://github.com/DL4DS/dl4ds_tutor
31
  ```
32
 
33
- 2. **Put your data under the `storage/data` directory**
 
 
 
 
 
 
34
  - Add URLs in the `urls.txt` file.
35
- - Add other PDF files in the `storage/data` directory.
36
 
37
  3. **To test Data Loading (Optional)**
38
  ```bash
39
- cd code
40
- python -m modules.dataloader.data_loader --links "your_pdf_link"
41
  ```
42
 
43
  4. **Create the Vector Database**
44
  ```bash
45
- cd code
46
- python -m modules.vectorstore.store_manager
47
  ```
48
- - Note: You need to run the above command when you add new data to the `storage/data` directory, or if the `storage/data/urls.txt` file is updated.
49
 
50
  6. **Run the FastAPI App**
51
  ```bash
52
- cd code
53
  uvicorn app:app --port 7860
54
  ```
55
 
@@ -64,7 +75,7 @@ The HuggingFace Space is built using the `Dockerfile` in the repository. To run
64
 
65
  ```bash
66
  docker build --tag dev -f Dockerfile.dev .
67
- docker run -it --rm -p 8000:8000 dev
68
  ```
69
 
70
  ## Contributing
 
9
  ---
10
  # DL4DS Tutor πŸƒ
11
 
12
+ ![Build Status](https://github.com/DL4DS/dl4ds_tutor/actions/workflows/push_to_hf_space.yml/badge.svg)
13
+ ![License](https://img.shields.io/github/license/DL4DS/dl4ds_tutor)
14
+ ![GitHub stars](https://img.shields.io/github/stars/DL4DS/dl4ds_tutor)
15
+ ![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)
16
+
17
+
18
  Check out the configuration reference at [Hugging Face Spaces Config Reference](https://huggingface.co/docs/hub/spaces-config-reference).
19
 
20
  You can find a "production" implementation of the Tutor running live at [DL4DS Tutor](https://dl4ds-dl4ds-tutor.hf.space/) from the
 
36
  git clone https://github.com/DL4DS/dl4ds_tutor
37
  ```
38
 
39
+ 2. Create your app in the apps folder. (An example is the `apps/ai_tutor` app)
40
+ ```
41
+ cd apps
42
+ mkdir your_app
43
+ ```
44
+
45
+ 2. **Put your data under the `apps/your_app/storage/data` directory**
46
  - Add URLs in the `urls.txt` file.
47
+ - Add other PDF files in the `apps/your_app/storage/data` directory.
48
 
49
  3. **To test Data Loading (Optional)**
50
  ```bash
51
+ cd apps/your_app
52
+ python -m modules.dataloader.data_loader --links "your_pdf_link" --config_file config/config.yml --project_config_file config/project_config.yml
53
  ```
54
 
55
  4. **Create the Vector Database**
56
  ```bash
57
+ cd apps/your_app
58
+ python -m modules.vectorstore.store_manager --config_file config/config.yml --project_config_file config/project_config.yml
59
  ```
 
60
 
61
  6. **Run the FastAPI App**
62
  ```bash
63
+ cd apps/your_app
64
  uvicorn app:app --port 7860
65
  ```
66
 
 
75
 
76
  ```bash
77
  docker build --tag dev -f Dockerfile.dev .
78
+ docker run -it --rm -p 7860:7860 dev
79
  ```
80
 
81
  ## Contributing
{code β†’ apps/ai_tutor}/.chainlit/config.toml RENAMED
@@ -69,7 +69,7 @@ github = "https://github.com/DL4DS/dl4ds_tutor"
69
 
70
  # Specify a CSS file that can be used to customize the user interface.
71
  # The CSS file can be served from the public directory or via an external link.
72
- custom_css = "/public/test.css"
73
 
74
  # Specify a Javascript file that can be used to customize the user interface.
75
  # The Javascript file can be served from the public directory.
 
69
 
70
  # Specify a CSS file that can be used to customize the user interface.
71
  # The CSS file can be served from the public directory or via an external link.
72
+ custom_css = "/public/files/test.css"
73
 
74
  # Specify a Javascript file that can be used to customize the user interface.
75
  # The Javascript file can be served from the public directory.
apps/ai_tutor/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # WIP
2
+
3
+
4
+ ## Run the encrypt_students script
5
+
6
+ - If you don't want the emails to be public, run this script to encrypt the emails of the students.
7
+ - This will create a new file in the public/files/ directory.
8
+ - Place your file with the students' emails in the private/ directory (do not commit this file to the repository).
9
+
10
+ ```bash
11
+ python encrypt_students.py --students-file private/students.json --encrypted-students-file public/files/students_encrypted.json
12
+ ```
{code β†’ apps/ai_tutor}/app.py RENAMED
@@ -8,24 +8,31 @@ from chainlit.utils import mount_chainlit
8
  import secrets
9
  import json
10
  import base64
11
- from modules.config.constants import (
12
  OAUTH_GOOGLE_CLIENT_ID,
13
  OAUTH_GOOGLE_CLIENT_SECRET,
14
  CHAINLIT_URL,
15
- GITHUB_REPO,
16
- DOCS_WEBSITE,
17
- ALL_TIME_TOKENS_ALLOCATED,
18
- TOKENS_LEFT,
19
  )
20
  from fastapi.middleware.cors import CORSMiddleware
21
  from fastapi.staticfiles import StaticFiles
22
- from modules.chat_processor.helpers import (
23
- get_user_details,
24
  get_time,
25
  reset_tokens_for_user,
26
  check_user_cooldown,
27
- update_user_info,
28
  )
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  GOOGLE_CLIENT_ID = OAUTH_GOOGLE_CLIENT_ID
31
  GOOGLE_CLIENT_SECRET = OAUTH_GOOGLE_CLIENT_SECRET
@@ -46,13 +53,8 @@ session_store = {}
46
  CHAINLIT_PATH = "/chainlit_tutor"
47
 
48
  # only admin is given any additional permissions for now -- no limits on tokens
49
- USER_ROLES = {
50
- "[email protected]": ["instructor", "bu"],
51
- "[email protected]": ["admin", "instructor", "bu"],
52
- "[email protected]": ["instructor", "bu"],
53
- "[email protected]": ["guest"],
54
- # Add more users and roles as needed
55
- }
56
 
57
  # Create a Google OAuth flow
58
  flow = Flow.from_client_config(
@@ -80,7 +82,20 @@ flow = Flow.from_client_config(
80
 
81
 
82
  def get_user_role(username: str):
83
- return USER_ROLES.get(username, ["guest"]) # Default to "guest" role
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
 
86
  async def get_user_info_from_cookie(request: Request):
@@ -146,6 +161,11 @@ async def login_page(request: Request):
146
  # return response
147
 
148
 
 
 
 
 
 
149
  @app.get("/login/google")
150
  async def login_google(request: Request):
151
  # Clear any existing session cookies to avoid conflicts with guest sessions
@@ -176,6 +196,9 @@ async def auth_google(request: Request):
176
  profile_image = user_info.get("picture", "")
177
  role = get_user_role(email)
178
 
 
 
 
179
  session_token = secrets.token_hex(16)
180
  session_store[session_token] = {
181
  "email": email,
@@ -228,7 +251,11 @@ async def cooldown(request: Request):
228
  else:
229
  user_details.metadata["in_cooldown"] = False
230
  await update_user_info(user_details)
231
- await reset_tokens_for_user(user_details)
 
 
 
 
232
  return RedirectResponse("/post-signin")
233
 
234
 
@@ -262,7 +289,11 @@ async def post_signin(request: Request):
262
  return RedirectResponse("/cooldown")
263
  else:
264
  user_details.metadata["in_cooldown"] = False
265
- await reset_tokens_for_user(user_details)
 
 
 
 
266
 
267
  if user_info:
268
  username = user_info["email"]
@@ -335,7 +366,11 @@ async def get_tokens_left(request: Request):
335
  try:
336
  user_info = await get_user_info_from_cookie(request)
337
  user_details = await get_user_details(user_info["email"])
338
- await reset_tokens_for_user(user_details)
 
 
 
 
339
  tokens_left = user_details.metadata["tokens_left"]
340
  return {"tokens_left": tokens_left}
341
  except Exception as e:
@@ -343,7 +378,7 @@ async def get_tokens_left(request: Request):
343
  return {"tokens_left": 0}
344
 
345
 
346
- mount_chainlit(app=app, target="main.py", path=CHAINLIT_PATH)
347
 
348
  if __name__ == "__main__":
349
  import uvicorn
 
8
  import secrets
9
  import json
10
  import base64
11
+ from config.constants import (
12
  OAUTH_GOOGLE_CLIENT_ID,
13
  OAUTH_GOOGLE_CLIENT_SECRET,
14
  CHAINLIT_URL,
15
+ EMAIL_ENCRYPTION_KEY,
 
 
 
16
  )
17
  from fastapi.middleware.cors import CORSMiddleware
18
  from fastapi.staticfiles import StaticFiles
19
+ from helpers import (
 
20
  get_time,
21
  reset_tokens_for_user,
22
  check_user_cooldown,
 
23
  )
24
+ from modules.chat_processor.helpers import get_user_details, update_user_info
25
+ from config.config_manager import config_manager
26
+ import hashlib
27
+
28
+ # set config
29
+ config = config_manager.get_config().dict()
30
+
31
+ # set constants
32
+ GITHUB_REPO = config["misc"]["github_repo"]
33
+ DOCS_WEBSITE = config["misc"]["docs_website"]
34
+ ALL_TIME_TOKENS_ALLOCATED = config["token_config"]["all_time_tokens_allocated"]
35
+ TOKENS_LEFT = config["token_config"]["tokens_left"]
36
 
37
  GOOGLE_CLIENT_ID = OAUTH_GOOGLE_CLIENT_ID
38
  GOOGLE_CLIENT_SECRET = OAUTH_GOOGLE_CLIENT_SECRET
 
53
  CHAINLIT_PATH = "/chainlit_tutor"
54
 
55
  # only admin is given any additional permissions for now -- no limits on tokens
56
+ with open("public/files/students_encrypted.json", "r") as file:
57
+ USER_ROLES = json.load(file)
 
 
 
 
 
58
 
59
  # Create a Google OAuth flow
60
  flow = Flow.from_client_config(
 
82
 
83
 
84
  def get_user_role(username: str):
85
+
86
+ # Function to deterministically hash emails
87
+ def deterministic_hash(email, salt):
88
+ return hashlib.pbkdf2_hmac("sha256", email.encode(), salt, 100000).hex()
89
+
90
+ # encrypt email (#FIXME: this is not the best way to do this, not really encryption, more like a hash)
91
+ encryption_salt = EMAIL_ENCRYPTION_KEY.encode()
92
+ encrypted_email = deterministic_hash(username, encryption_salt)
93
+ role = USER_ROLES.get(encrypted_email, ["guest"])
94
+
95
+ if "guest" in role:
96
+ return "unauthorized"
97
+
98
+ return role
99
 
100
 
101
  async def get_user_info_from_cookie(request: Request):
 
161
  # return response
162
 
163
 
164
+ @app.get("/unauthorized", response_class=HTMLResponse)
165
+ async def unauthorized(request: Request):
166
+ return templates.TemplateResponse("unauthorized.html", {"request": request})
167
+
168
+
169
  @app.get("/login/google")
170
  async def login_google(request: Request):
171
  # Clear any existing session cookies to avoid conflicts with guest sessions
 
196
  profile_image = user_info.get("picture", "")
197
  role = get_user_role(email)
198
 
199
+ if role == "unauthorized":
200
+ return RedirectResponse("/unauthorized")
201
+
202
  session_token = secrets.token_hex(16)
203
  session_store[session_token] = {
204
  "email": email,
 
251
  else:
252
  user_details.metadata["in_cooldown"] = False
253
  await update_user_info(user_details)
254
+ await reset_tokens_for_user(
255
+ user_details,
256
+ config["token_config"]["tokens_left"],
257
+ config["token_config"]["regen_time"],
258
+ )
259
  return RedirectResponse("/post-signin")
260
 
261
 
 
289
  return RedirectResponse("/cooldown")
290
  else:
291
  user_details.metadata["in_cooldown"] = False
292
+ await reset_tokens_for_user(
293
+ user_details,
294
+ config["token_config"]["tokens_left"],
295
+ config["token_config"]["regen_time"],
296
+ )
297
 
298
  if user_info:
299
  username = user_info["email"]
 
366
  try:
367
  user_info = await get_user_info_from_cookie(request)
368
  user_details = await get_user_details(user_info["email"])
369
+ await reset_tokens_for_user(
370
+ user_details,
371
+ config["token_config"]["tokens_left"],
372
+ config["token_config"]["regen_time"],
373
+ )
374
  tokens_left = user_details.metadata["tokens_left"]
375
  return {"tokens_left": tokens_left}
376
  except Exception as e:
 
378
  return {"tokens_left": 0}
379
 
380
 
381
+ mount_chainlit(app=app, target="chainlit_app.py", path=CHAINLIT_PATH)
382
 
383
  if __name__ == "__main__":
384
  import uvicorn
code/main.py β†’ apps/ai_tutor/chainlit_app.py RENAMED
@@ -1,12 +1,11 @@
1
  import chainlit.data as cl_data
2
  import asyncio
3
- from modules.config.constants import (
4
  LITERAL_API_KEY_LOGGING,
5
  LITERAL_API_URL,
6
  )
7
  from modules.chat_processor.literal_ai import CustomLiteralDataLayer
8
  import json
9
- import yaml
10
  from typing import Any, Dict, no_type_check
11
  import chainlit as cl
12
  from modules.chat.llm_tutor import LLMTutor
@@ -18,11 +17,13 @@ from modules.chat.helpers import (
18
  )
19
  from modules.chat_processor.helpers import (
20
  update_user_info,
21
- get_time,
 
 
22
  check_user_cooldown,
23
  reset_tokens_for_user,
24
- get_user_details,
25
  )
 
26
  import copy
27
  from typing import Optional
28
  from chainlit.types import ThreadDict
@@ -30,6 +31,7 @@ import time
30
  import base64
31
  from langchain_community.callbacks import get_openai_callback
32
  from datetime import datetime, timezone
 
33
 
34
  USER_TIMEOUT = 60_000
35
  SYSTEM = "System"
@@ -38,8 +40,8 @@ AGENT = "Agent"
38
  YOU = "User"
39
  ERROR = "Error"
40
 
41
- with open("modules/config/config.yml", "r") as f:
42
- config = yaml.safe_load(f)
43
 
44
 
45
  async def setup_data_layer():
@@ -81,13 +83,6 @@ class Chatbot:
81
  """
82
  self.config = config
83
 
84
- async def _load_config(self):
85
- """
86
- Load the configuration from a YAML file.
87
- """
88
- with open("modules/config/config.yml", "r") as f:
89
- return yaml.safe_load(f)
90
-
91
  @no_type_check
92
  async def setup_llm(self):
93
  """
@@ -271,24 +266,24 @@ class Chatbot:
271
  print(e)
272
  return [
273
  cl.Starter(
274
- label="recording on CNNs?",
275
  message="Where can I find the recording for the lecture on Transformers?",
276
- icon="/public/adv-screen-recorder-svgrepo-com.svg",
277
  ),
278
  cl.Starter(
279
- label="where's the slides?",
280
  message="When are the lectures? I can't find the schedule.",
281
- icon="/public/alarmy-svgrepo-com.svg",
282
  ),
283
  cl.Starter(
284
  label="Due Date?",
285
  message="When is the final project due?",
286
- icon="/public/calendar-samsung-17-svgrepo-com.svg",
287
  ),
288
  cl.Starter(
289
  label="Explain backprop.",
290
  message="I didn't understand the math behind backprop, could you explain it?",
291
- icon="/public/acastusphoton-svgrepo-com.svg",
292
  ),
293
  ]
294
 
@@ -305,7 +300,7 @@ class Chatbot:
305
  rename_dict = {"Chatbot": LLM}
306
  return rename_dict.get(orig_author, orig_author)
307
 
308
- async def start(self, config=None):
309
  """
310
  Start the chatbot, initialize settings widgets,
311
  and display and load previous conversation if chat logging is enabled.
@@ -313,10 +308,6 @@ class Chatbot:
313
 
314
  start_time = time.time()
315
 
316
- self.config = (
317
- await self._load_config() if config is None else config
318
- ) # Reload the configuration on chat resume
319
-
320
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
321
 
322
  user = cl.user_session.get("user")
@@ -386,7 +377,11 @@ class Chatbot:
386
 
387
  # update user info with last message time
388
  user = cl.user_session.get("user")
389
- await reset_tokens_for_user(user)
 
 
 
 
390
  updated_user = await get_user_details(user.identifier)
391
  user.metadata = updated_user.metadata
392
  cl.user_session.set("user", user)
@@ -530,7 +525,6 @@ class Chatbot:
530
  elements=source_elements,
531
  author=LLM,
532
  actions=actions,
533
- metadata=self.config,
534
  ).send()
535
 
536
  async def on_chat_resume(self, thread: ThreadDict):
 
1
  import chainlit.data as cl_data
2
  import asyncio
3
+ from config.constants import (
4
  LITERAL_API_KEY_LOGGING,
5
  LITERAL_API_URL,
6
  )
7
  from modules.chat_processor.literal_ai import CustomLiteralDataLayer
8
  import json
 
9
  from typing import Any, Dict, no_type_check
10
  import chainlit as cl
11
  from modules.chat.llm_tutor import LLMTutor
 
17
  )
18
  from modules.chat_processor.helpers import (
19
  update_user_info,
20
+ get_user_details,
21
+ )
22
+ from helpers import (
23
  check_user_cooldown,
24
  reset_tokens_for_user,
 
25
  )
26
+ from helpers import get_time
27
  import copy
28
  from typing import Optional
29
  from chainlit.types import ThreadDict
 
31
  import base64
32
  from langchain_community.callbacks import get_openai_callback
33
  from datetime import datetime, timezone
34
+ from config.config_manager import config_manager
35
 
36
  USER_TIMEOUT = 60_000
37
  SYSTEM = "System"
 
40
  YOU = "User"
41
  ERROR = "Error"
42
 
43
+ # set config
44
+ config = config_manager.get_config().dict()
45
 
46
 
47
  async def setup_data_layer():
 
83
  """
84
  self.config = config
85
 
 
 
 
 
 
 
 
86
  @no_type_check
87
  async def setup_llm(self):
88
  """
 
266
  print(e)
267
  return [
268
  cl.Starter(
269
+ label="recording on Transformers?",
270
  message="Where can I find the recording for the lecture on Transformers?",
271
+ icon="/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg",
272
  ),
273
  cl.Starter(
274
+ label="where's the schedule?",
275
  message="When are the lectures? I can't find the schedule.",
276
+ icon="/public/assets/images/starter_icons/alarmy-svgrepo-com.svg",
277
  ),
278
  cl.Starter(
279
  label="Due Date?",
280
  message="When is the final project due?",
281
+ icon="/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg",
282
  ),
283
  cl.Starter(
284
  label="Explain backprop.",
285
  message="I didn't understand the math behind backprop, could you explain it?",
286
+ icon="/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg",
287
  ),
288
  ]
289
 
 
300
  rename_dict = {"Chatbot": LLM}
301
  return rename_dict.get(orig_author, orig_author)
302
 
303
+ async def start(self):
304
  """
305
  Start the chatbot, initialize settings widgets,
306
  and display and load previous conversation if chat logging is enabled.
 
308
 
309
  start_time = time.time()
310
 
 
 
 
 
311
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
312
 
313
  user = cl.user_session.get("user")
 
377
 
378
  # update user info with last message time
379
  user = cl.user_session.get("user")
380
+ await reset_tokens_for_user(
381
+ user,
382
+ self.config["token_config"]["tokens_left"],
383
+ self.config["token_config"]["regen_time"],
384
+ )
385
  updated_user = await get_user_details(user.identifier)
386
  user.metadata = updated_user.metadata
387
  cl.user_session.set("user", user)
 
525
  elements=source_elements,
526
  author=LLM,
527
  actions=actions,
 
528
  ).send()
529
 
530
  async def on_chat_resume(self, thread: ThreadDict):
{code/modules β†’ apps/ai_tutor}/config/config.yml RENAMED
@@ -1,15 +1,15 @@
1
- log_dir: '../storage/logs' # str
2
- log_chunk_dir: '../storage/logs/chunks' # str
3
  device: 'cpu' # str [cuda, cpu]
4
 
5
  vectorstore:
6
  load_from_HF: True # bool
7
  reparse_files: True # bool
8
- data_path: '../storage/data' # str
9
- url_file_path: '../storage/data/urls.txt' # str
10
  expand_urls: True # bool
11
  db_option : 'RAGatouille' # str [FAISS, Chroma, RAGatouille, RAPTOR]
12
- db_path : '../vectorstores' # str
13
  model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
14
  search_top_k : 3 # int
15
  score_threshold : 0.2 # float
 
1
+ log_dir: 'storage/logs' # str
2
+ log_chunk_dir: 'storage/logs/chunks' # str
3
  device: 'cpu' # str [cuda, cpu]
4
 
5
  vectorstore:
6
  load_from_HF: True # bool
7
  reparse_files: True # bool
8
+ data_path: 'storage/data' # str
9
+ url_file_path: 'storage/data/urls.txt' # str
10
  expand_urls: True # bool
11
  db_option : 'RAGatouille' # str [FAISS, Chroma, RAGatouille, RAPTOR]
12
+ db_path : 'vectorstores' # str
13
  model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
14
  search_top_k : 3 # int
15
  score_threshold : 0.2 # float
apps/ai_tutor/config/config_manager.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, conint, confloat, HttpUrl
2
+ from typing import Optional, List
3
+ import yaml
4
+
5
+
6
+ class FaissParams(BaseModel):
7
+ index_path: str = "vectorstores/faiss.index"
8
+ index_type: str = "Flat" # Options: [Flat, HNSW, IVF]
9
+ index_dimension: conint(gt=0) = 384
10
+ index_nlist: conint(gt=0) = 100
11
+ index_nprobe: conint(gt=0) = 10
12
+
13
+
14
+ class ColbertParams(BaseModel):
15
+ index_name: str = "new_idx"
16
+
17
+
18
+ class VectorStoreConfig(BaseModel):
19
+ load_from_HF: bool = True
20
+ reparse_files: bool = True
21
+ data_path: str = "storage/data"
22
+ url_file_path: str = "storage/data/urls.txt"
23
+ expand_urls: bool = True
24
+ db_option: str = "RAGatouille" # Options: [FAISS, Chroma, RAGatouille, RAPTOR]
25
+ db_path: str = "vectorstores"
26
+ model: str = (
27
+ "sentence-transformers/all-MiniLM-L6-v2" # Options: [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002]
28
+ )
29
+ search_top_k: conint(gt=0) = 3
30
+ score_threshold: confloat(ge=0.0, le=1.0) = 0.2
31
+
32
+ faiss_params: Optional[FaissParams] = None
33
+ colbert_params: Optional[ColbertParams] = None
34
+
35
+
36
+ class OpenAIParams(BaseModel):
37
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
38
+
39
+
40
+ class LocalLLMParams(BaseModel):
41
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
42
+ repo_id: str = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" # HuggingFace repo id
43
+ filename: str = (
44
+ "tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Specific name of gguf file in the repo
45
+ )
46
+ model_path: str = (
47
+ "storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Path to the model file
48
+ )
49
+
50
+
51
+ class LLMParams(BaseModel):
52
+ llm_arch: str = "langchain" # Options: [langchain]
53
+ use_history: bool = True
54
+ generate_follow_up: bool = False
55
+ memory_window: conint(ge=1) = 3
56
+ llm_style: str = "Normal" # Options: [Normal, ELI5]
57
+ llm_loader: str = (
58
+ "gpt-4o-mini" # Options: [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
59
+ )
60
+ openai_params: Optional[OpenAIParams] = None
61
+ local_llm_params: Optional[LocalLLMParams] = None
62
+ stream: bool = False
63
+ pdf_reader: str = "gpt" # Options: [llama, pymupdf, gpt]
64
+
65
+
66
+ class ChatLoggingConfig(BaseModel):
67
+ log_chat: bool = True
68
+ platform: str = "literalai"
69
+ callbacks: bool = True
70
+
71
+
72
+ class SplitterOptions(BaseModel):
73
+ use_splitter: bool = True
74
+ split_by_token: bool = True
75
+ remove_leftover_delimiters: bool = True
76
+ remove_chunks: bool = False
77
+ chunking_mode: str = "semantic" # Options: [fixed, semantic]
78
+ chunk_size: conint(gt=0) = 300
79
+ chunk_overlap: conint(ge=0) = 30
80
+ chunk_separators: List[str] = ["\n\n", "\n", " ", ""]
81
+ front_chunks_to_remove: Optional[conint(ge=0)] = None
82
+ last_chunks_to_remove: Optional[conint(ge=0)] = None
83
+ delimiters_to_remove: List[str] = ["\t", "\n", " ", " "]
84
+
85
+
86
+ class RetrieverConfig(BaseModel):
87
+ retriever_hf_paths: dict[str, str] = {"RAGatouille": "XThomasBU/Colbert_Index"}
88
+
89
+
90
+ class MetadataConfig(BaseModel):
91
+ metadata_links: List[HttpUrl] = [
92
+ "https://dl4ds.github.io/sp2024/lectures/",
93
+ "https://dl4ds.github.io/sp2024/schedule/",
94
+ ]
95
+ slide_base_link: HttpUrl = "https://dl4ds.github.io"
96
+
97
+
98
+ class TokenConfig(BaseModel):
99
+ cooldown_time: conint(gt=0) = 60
100
+ regen_time: conint(gt=0) = 180
101
+ tokens_left: conint(gt=0) = 2000
102
+ all_time_tokens_allocated: conint(gt=0) = 1000000
103
+
104
+
105
+ class MiscConfig(BaseModel):
106
+ github_repo: HttpUrl = "https://github.com/DL4DS/dl4ds_tutor"
107
+ docs_website: HttpUrl = "https://dl4ds.github.io/dl4ds_tutor/"
108
+
109
+
110
+ class APIConfig(BaseModel):
111
+ timeout: conint(gt=0) = 60
112
+
113
+
114
+ class Config(BaseModel):
115
+ log_dir: str = "storage/logs"
116
+ log_chunk_dir: str = "storage/logs/chunks"
117
+ device: str = "cpu" # Options: ['cuda', 'cpu']
118
+
119
+ vectorstore: VectorStoreConfig
120
+ llm_params: LLMParams
121
+ chat_logging: ChatLoggingConfig
122
+ splitter_options: SplitterOptions
123
+ retriever: RetrieverConfig
124
+ metadata: MetadataConfig
125
+ token_config: TokenConfig
126
+ misc: MiscConfig
127
+ api_config: APIConfig
128
+
129
+
130
+ class ConfigManager:
131
+ def __init__(self, config_path: str, project_config_path: str):
132
+ self.config_path = config_path
133
+ self.project_config_path = project_config_path
134
+ self.config = self.load_config()
135
+ self.validate_config()
136
+
137
+ def load_config(self) -> Config:
138
+ with open(self.config_path, "r") as f:
139
+ config_data = yaml.safe_load(f)
140
+
141
+ with open(self.project_config_path, "r") as f:
142
+ project_config_data = yaml.safe_load(f)
143
+
144
+ # Merge the two configurations
145
+ merged_config = {**config_data, **project_config_data}
146
+
147
+ return Config(**merged_config)
148
+
149
+ def get_config(self) -> Config:
150
+ return ConfigWrapper(self.config)
151
+
152
+ def validate_config(self):
153
+ # If any required fields are missing, raise an error
154
+ # required_fields = [
155
+ # "vectorstore", "llm_params", "chat_logging", "splitter_options",
156
+ # "retriever", "metadata", "token_config", "misc", "api_config"
157
+ # ]
158
+ # for field in required_fields:
159
+ # if not hasattr(self.config, field):
160
+ # raise ValueError(f"Missing required configuration field: {field}")
161
+
162
+ # # Validate types of specific fields
163
+ # if not isinstance(self.config.vectorstore, VectorStoreConfig):
164
+ # raise TypeError("vectorstore must be an instance of VectorStoreConfig")
165
+ # if not isinstance(self.config.llm_params, LLMParams):
166
+ # raise TypeError("llm_params must be an instance of LLMParams")
167
+ pass
168
+
169
+
170
+ class ConfigWrapper:
171
+ def __init__(self, config: Config):
172
+ self._config = config
173
+
174
+ def __getitem__(self, key):
175
+ return getattr(self._config, key)
176
+
177
+ def __getattr__(self, name):
178
+ return getattr(self._config, name)
179
+
180
+ def dict(self):
181
+ return self._config.dict()
182
+
183
+
184
+ # Usage
185
+ config_manager = ConfigManager(
186
+ config_path="config/config.yml", project_config_path="config/project_config.yml"
187
+ )
188
+ # config = config_manager.get_config().dict()
{code/modules β†’ apps/ai_tutor}/config/constants.py RENAMED
@@ -3,15 +3,6 @@ import os
3
 
4
  load_dotenv()
5
 
6
- TIMEOUT = 60
7
- COOLDOWN_TIME = 60
8
- REGEN_TIME = 180
9
- TOKENS_LEFT = 2000
10
- ALL_TIME_TOKENS_ALLOCATED = 1000000
11
-
12
- GITHUB_REPO = "https://github.com/DL4DS/dl4ds_tutor"
13
- DOCS_WEBSITE = "https://dl4ds.github.io/dl4ds_tutor/"
14
-
15
  # API Keys - Loaded from the .env file
16
 
17
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@@ -20,6 +11,7 @@ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
20
  LITERAL_API_KEY_LOGGING = os.getenv("LITERAL_API_KEY_LOGGING")
21
  LITERAL_API_URL = os.getenv("LITERAL_API_URL")
22
  CHAINLIT_URL = os.getenv("CHAINLIT_URL")
 
23
 
24
  OAUTH_GOOGLE_CLIENT_ID = os.getenv("OAUTH_GOOGLE_CLIENT_ID")
25
  OAUTH_GOOGLE_CLIENT_SECRET = os.getenv("OAUTH_GOOGLE_CLIENT_SECRET")
 
3
 
4
  load_dotenv()
5
 
 
 
 
 
 
 
 
 
 
6
  # API Keys - Loaded from the .env file
7
 
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
11
  LITERAL_API_KEY_LOGGING = os.getenv("LITERAL_API_KEY_LOGGING")
12
  LITERAL_API_URL = os.getenv("LITERAL_API_URL")
13
  CHAINLIT_URL = os.getenv("CHAINLIT_URL")
14
+ EMAIL_ENCRYPTION_KEY = os.getenv("EMAIL_ENCRYPTION_KEY")
15
 
16
  OAUTH_GOOGLE_CLIENT_ID = os.getenv("OAUTH_GOOGLE_CLIENT_ID")
17
  OAUTH_GOOGLE_CLIENT_SECRET = os.getenv("OAUTH_GOOGLE_CLIENT_SECRET")
apps/ai_tutor/config/project_config.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ retriever:
2
+ retriever_hf_paths:
3
+ RAGatouille: "XThomasBU/Colbert_Index"
4
+
5
+ metadata:
6
+ metadata_links: ["https://dl4ds.github.io/sp2024/lectures/", "https://dl4ds.github.io/sp2024/schedule/"]
7
+ slide_base_link: "https://dl4ds.github.io"
8
+
9
+ token_config:
10
+ cooldown_time: 60
11
+ regen_time: 180
12
+ tokens_left: 2000
13
+ all_time_tokens_allocated: 1000000
14
+
15
+ misc:
16
+ github_repo: "https://github.com/DL4DS/dl4ds_tutor"
17
+ docs_website: "https://dl4ds.github.io/dl4ds_tutor/"
18
+
19
+ api_config:
20
+ timeout: 60
{code/modules β†’ apps/ai_tutor}/config/prompts.py RENAMED
File without changes
apps/ai_tutor/encrypt_students.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import hashlib
3
+ import json
4
+ import argparse
5
+ from dotenv import load_dotenv
6
+
7
+
8
+ # Function to deterministically hash emails
9
+ def deterministic_hash(email, salt):
10
+ return hashlib.pbkdf2_hmac("sha256", email.encode(), salt, 100000).hex()
11
+
12
+
13
+ def main(args):
14
+ # Load the .env file
15
+ load_dotenv()
16
+
17
+ # Get the encryption key (salt)
18
+ encryption_salt = os.getenv("EMAIL_ENCRYPTION_KEY").encode()
19
+
20
+ # Load emails from the specified JSON file
21
+ with open(args.students_file, "r") as file:
22
+ emails = json.load(file)
23
+
24
+ # Replace emails with deterministic hashed emails, {hashed_email: [roles]}
25
+ hashed_emails = {
26
+ deterministic_hash(email, encryption_salt): roles
27
+ for email, roles in emails.items()
28
+ }
29
+
30
+ # Save hashed emails to the specified encrypted JSON file
31
+ with open(args.encrypted_students_file, "w") as file:
32
+ json.dump(hashed_emails, file)
33
+
34
+
35
+ if __name__ == "__main__":
36
+ parser = argparse.ArgumentParser(
37
+ description="Encrypt student emails in a JSON file."
38
+ )
39
+ parser.add_argument(
40
+ "--students-file",
41
+ type=str,
42
+ default="private/students.json",
43
+ help="Path to the students JSON file",
44
+ )
45
+ parser.add_argument(
46
+ "--encrypted-students-file",
47
+ type=str,
48
+ default="public/files/students_encrypted.json",
49
+ help="Path to save the encrypted students JSON file",
50
+ )
51
+ args = parser.parse_args()
52
+
53
+ main(args)
apps/ai_tutor/helpers.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta, timezone
2
+ import tiktoken
3
+ from modules.chat_processor.helpers import update_user_info, convert_to_dict
4
+
5
+
6
+ def get_time():
7
+ return datetime.now(timezone.utc).isoformat()
8
+
9
+
10
+ async def check_user_cooldown(
11
+ user_info, current_time, COOLDOWN_TIME, TOKENS_LEFT, REGEN_TIME
12
+ ):
13
+ # # Check if no tokens left
14
+ tokens_left = user_info.metadata.get("tokens_left", 0)
15
+ if tokens_left > 0 and not user_info.metadata.get("in_cooldown", False):
16
+ return False, None
17
+
18
+ user_info = convert_to_dict(user_info)
19
+ last_message_time_str = user_info["metadata"].get("last_message_time")
20
+
21
+ # Convert from ISO format string to datetime object and ensure UTC timezone
22
+ last_message_time = datetime.fromisoformat(last_message_time_str).replace(
23
+ tzinfo=timezone.utc
24
+ )
25
+ current_time = datetime.fromisoformat(current_time).replace(tzinfo=timezone.utc)
26
+
27
+ # Calculate the elapsed time
28
+ elapsed_time = current_time - last_message_time
29
+ elapsed_time_in_seconds = elapsed_time.total_seconds()
30
+
31
+ # Calculate when the cooldown period ends
32
+ cooldown_end_time = last_message_time + timedelta(seconds=COOLDOWN_TIME)
33
+ cooldown_end_time_iso = cooldown_end_time.isoformat()
34
+
35
+ # Debug: Print the cooldown end time
36
+ print(f"Cooldown end time (ISO): {cooldown_end_time_iso}")
37
+
38
+ # Check if the user is still in cooldown
39
+ if elapsed_time_in_seconds < COOLDOWN_TIME:
40
+ return True, cooldown_end_time_iso # Return in ISO 8601 format
41
+
42
+ user_info["metadata"]["in_cooldown"] = False
43
+ # If not in cooldown, regenerate tokens
44
+ await reset_tokens_for_user(user_info, TOKENS_LEFT, REGEN_TIME)
45
+
46
+ return False, None
47
+
48
+
49
+ async def reset_tokens_for_user(user_info, TOKENS_LEFT, REGEN_TIME):
50
+ user_info = convert_to_dict(user_info)
51
+ last_message_time_str = user_info["metadata"].get("last_message_time")
52
+
53
+ last_message_time = datetime.fromisoformat(last_message_time_str).replace(
54
+ tzinfo=timezone.utc
55
+ )
56
+ current_time = datetime.fromisoformat(get_time()).replace(tzinfo=timezone.utc)
57
+
58
+ # Calculate the elapsed time since the last message
59
+ elapsed_time_in_seconds = (current_time - last_message_time).total_seconds()
60
+
61
+ # Current token count (can be negative)
62
+ current_tokens = user_info["metadata"].get("tokens_left_at_last_message", 0)
63
+ current_tokens = min(current_tokens, TOKENS_LEFT)
64
+
65
+ # Maximum tokens that can be regenerated
66
+ max_tokens = user_info["metadata"].get("max_tokens", TOKENS_LEFT)
67
+
68
+ # Calculate how many tokens should have been regenerated proportionally
69
+ if current_tokens < max_tokens:
70
+ # Calculate the regeneration rate per second based on REGEN_TIME for full regeneration
71
+ regeneration_rate_per_second = max_tokens / REGEN_TIME
72
+
73
+ # Calculate how many tokens should have been regenerated based on the elapsed time
74
+ tokens_to_regenerate = int(
75
+ elapsed_time_in_seconds * regeneration_rate_per_second
76
+ )
77
+
78
+ # Ensure the new token count does not exceed max_tokens
79
+ new_token_count = min(current_tokens + tokens_to_regenerate, max_tokens)
80
+
81
+ print(
82
+ f"\n\n Adding {tokens_to_regenerate} tokens to the user, Time elapsed: {elapsed_time_in_seconds} seconds, Tokens after regeneration: {new_token_count}, Tokens before: {current_tokens} \n\n"
83
+ )
84
+
85
+ # Update the user's token count
86
+ user_info["metadata"]["tokens_left"] = new_token_count
87
+
88
+ await update_user_info(user_info)
89
+
90
+
91
+ def get_num_tokens(text, model):
92
+ encoding = tiktoken.encoding_for_model(model)
93
+ tokens = encoding.encode(text)
94
+ return len(tokens)
apps/ai_tutor/private/placeholder_students_file.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "[email protected]": ["student", "bu"],
3
+ "[email protected]": ["student", "bu"],
4
+ "[email protected]": ["admin", "instructor", "bu"]
5
+ }
code/public/avatars/ai_tutor.png β†’ apps/ai_tutor/public/assets/images/avatars/ai-tutor.png RENAMED
File without changes
code/public/logo_dark.png β†’ apps/ai_tutor/public/assets/images/avatars/ai_tutor.png RENAMED
File without changes
{code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/acastusphoton-svgrepo-com.svg RENAMED
File without changes
{code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/adv-screen-recorder-svgrepo-com.svg RENAMED
File without changes
{code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/alarmy-svgrepo-com.svg RENAMED
File without changes
{code/public β†’ apps/ai_tutor/public/assets/images/starter_icons}/calendar-samsung-17-svgrepo-com.svg RENAMED
File without changes
apps/ai_tutor/public/files/students_encrypted.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"7f1cacca66ee914ddde2ee20e0f2c96651d60cd8aabd310ef25a9e6d88f42df0": ["instructor", "bu"], "f74d264b6b5b2b4c10ce69e4ec16e869e01cb5eb668ed846aa8f6dae5c96cda0": ["admin", "instructor", "bu"], "53401356a874b1539775c73a8564d5e5f4f840441630c9cf649e16d201454f20": ["instructor", "bu"]}
apps/ai_tutor/public/files/test.css ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a[href*='https://github.com/Chainlit/chainlit'] {
2
+ visibility: hidden;
3
+ }
4
+
5
+ /* Hide the default avatar image */
6
+ .MuiAvatar-root img.MuiAvatar-img {
7
+ display: none;
8
+ }
9
+
10
+ /* Target the container of the image and set a custom background image */
11
+ .MuiAvatar-root.MuiAvatar-circular.css-m2icte {
12
+ background-image: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with your custom image URL */
13
+ background-size: cover; /* Ensure the image covers the entire container */
14
+ background-position: center; /* Center the image */
15
+ width: 100px; /* Ensure the dimensions match the original */
16
+ height: 100px; /* Ensure the dimensions match the original */
17
+ border-radius: 50%; /* Maintain circular shape */
18
+ }
19
+ .MuiAvatar-root.MuiAvatar-circular.css-v72an7 {
20
+ background-image: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with your custom image URL */
21
+ background-size: cover; /* Ensure the image covers the entire container */
22
+ background-position: center; /* Center the image */
23
+ width: 40px; /* Ensure the dimensions match the original */
24
+ height: 40px; /* Ensure the dimensions match the original */
25
+ border-radius: 50%; /* Maintain circular shape */
26
+ }
27
+
28
+ .MuiStack-root.css-14k6mw7 img {
29
+ content: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with the path to your custom image */
30
+ max-height: 45px; /* Ensure the height remains consistent */
31
+ max-width: 45px; /* Ensure the width remains consistent */
32
+ }
code/public/logo_light.png β†’ apps/ai_tutor/public/logo_dark.png RENAMED
File without changes
apps/ai_tutor/public/logo_light.png ADDED
{storage β†’ apps/ai_tutor/storage}/data/urls.txt RENAMED
File without changes
{code β†’ apps/ai_tutor}/templates/cooldown.html RENAMED
@@ -121,7 +121,7 @@
121
  </head>
122
  <body>
123
  <div class="container">
124
- <img src="/public/avatars/ai_tutor.png" alt="AI Tutor Avatar" class="avatar">
125
  <h1>Hello, {{ username }}</h1>
126
  <p>It seems like you need to wait a bit before starting a new session.</p>
127
  <p class="cooldown-message">Time remaining until the cooldown period ends:</p>
 
121
  </head>
122
  <body>
123
  <div class="container">
124
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
125
  <h1>Hello, {{ username }}</h1>
126
  <p>It seems like you need to wait a bit before starting a new session.</p>
127
  <p class="cooldown-message">Time remaining until the cooldown period ends:</p>
{code β†’ apps/ai_tutor}/templates/dashboard.html RENAMED
@@ -123,7 +123,7 @@
123
  </head>
124
  <body>
125
  <div class="container">
126
- <img src="/public/avatars/ai_tutor.png" alt="AI Tutor Avatar" class="avatar">
127
  <h1>Welcome, {{ username }}</h1>
128
  <p>Ready to start your AI tutoring session?</p>
129
  <p class="tokens-left">Tokens Left: {{ tokens_left }}</p>
 
123
  </head>
124
  <body>
125
  <div class="container">
126
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
127
  <h1>Welcome, {{ username }}</h1>
128
  <p>Ready to start your AI tutoring session?</p>
129
  <p class="tokens-left">Tokens Left: {{ tokens_left }}</p>
{code β†’ apps/ai_tutor}/templates/error.html RENAMED
File without changes
{code β†’ apps/ai_tutor}/templates/error_404.html RENAMED
File without changes
{code β†’ apps/ai_tutor}/templates/login.html RENAMED
@@ -107,7 +107,7 @@
107
  </head>
108
  <body>
109
  <div class="container">
110
- <img src="/public/avatars/ai_tutor.png" alt="AI Tutor Avatar" class="avatar">
111
  <h1>Terrier Tutor</h1>
112
  <p>Welcome to the DS598 AI Tutor. Please sign in to continue.</p>
113
  <form action="/login/google" method="get">
 
107
  </head>
108
  <body>
109
  <div class="container">
110
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
111
  <h1>Terrier Tutor</h1>
112
  <p>Welcome to the DS598 AI Tutor. Please sign in to continue.</p>
113
  <form action="/login/google" method="get">
{code β†’ apps/ai_tutor}/templates/logout.html RENAMED
File without changes
apps/ai_tutor/templates/unauthorized.html ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Access Restricted</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
9
+
10
+ body, html {
11
+ margin: 0;
12
+ padding: 0;
13
+ font-family: 'Inter', sans-serif;
14
+ background-color: #f7f7f7; /* Light gray background */
15
+ background-image: url('https://www.transparenttextures.com/patterns/cubes.png'); /* Subtle geometric pattern */
16
+ background-repeat: repeat;
17
+ display: flex;
18
+ align-items: center;
19
+ justify-content: center;
20
+ height: 100vh;
21
+ color: #333;
22
+ }
23
+
24
+ .container {
25
+ background: rgba(255, 255, 255, 0.9);
26
+ border: 1px solid #ddd;
27
+ border-radius: 8px;
28
+ width: 100%;
29
+ max-width: 400px;
30
+ padding: 50px;
31
+ box-sizing: border-box;
32
+ text-align: center;
33
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
34
+ backdrop-filter: blur(10px);
35
+ -webkit-backdrop-filter: blur(10px);
36
+ }
37
+
38
+ .avatar {
39
+ width: 90px;
40
+ height: 90px;
41
+ border-radius: 50%;
42
+ margin-bottom: 25px;
43
+ border: 2px solid #ddd;
44
+ }
45
+
46
+ .container h1 {
47
+ margin-bottom: 20px;
48
+ font-size: 26px;
49
+ font-weight: 600;
50
+ color: #1a1a1a;
51
+ }
52
+
53
+ .container p {
54
+ font-size: 18px;
55
+ color: #4a4a4a;
56
+ margin-bottom: 35px;
57
+ line-height: 1.5;
58
+ }
59
+
60
+ .button {
61
+ padding: 14px 0;
62
+ margin: 12px 0;
63
+ font-size: 16px;
64
+ border-radius: 6px;
65
+ cursor: pointer;
66
+ width: 100%;
67
+ border: 1px solid #ccc;
68
+ background-color: #007BFF;
69
+ color: #fff;
70
+ transition: background-color 0.3s ease, border-color 0.3s ease;
71
+ }
72
+
73
+ .button:hover {
74
+ background-color: #0056b3;
75
+ border-color: #0056b3;
76
+ }
77
+ </style>
78
+ </head>
79
+ <body>
80
+ <div class="container">
81
+ <img src="/public/assets/images/avatars/ai-tutor.png" alt="AI Tutor Avatar" class="avatar">
82
+ <h1>Access Restricted</h1>
83
+ <p>
84
+ We're currently testing things out for the <strong>DS598</strong> course.
85
+ Access is restricted to students of the course. If you're enrolled in <strong>DS598</strong> and seeing this message,
86
+ please reach out to us, and we'll help you get access.<br><br>
87
+ <em>P.S. Don't forget to use your BU email when logging in!</em>
88
+ </p>
89
+ <form action="/" method="get">
90
+ <button type="submit" class="button">Return to Home</button>
91
+ </form>
92
+ </div>
93
+ </body>
94
+ </html>
apps/chainlit_base/.chainlit/config.toml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
23
+ unsafe_allow_html = false
24
+
25
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
26
+ latex = false
27
+
28
+ # Automatically tag threads with the current chat profile (if a chat profile is used)
29
+ auto_tag_thread = true
30
+
31
+ # Authorize users to spontaneously upload files with messages
32
+ [features.spontaneous_file_upload]
33
+ enabled = true
34
+ accept = ["*/*"]
35
+ max_files = 20
36
+ max_size_mb = 500
37
+
38
+ [features.audio]
39
+ # Threshold for audio recording
40
+ min_decibels = -45
41
+ # Delay for the user to start speaking in MS
42
+ initial_silence_timeout = 3000
43
+ # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
44
+ silence_timeout = 1500
45
+ # Above this duration (MS), the recording will forcefully stop.
46
+ max_duration = 15000
47
+ # Duration of the audio chunks in MS
48
+ chunk_duration = 1000
49
+ # Sample rate of the audio
50
+ sample_rate = 44100
51
+
52
+ edit_message = true
53
+
54
+ [UI]
55
+ # Name of the assistant.
56
+ name = "Assistant"
57
+
58
+ # Description of the assistant. This is used for HTML tags.
59
+ # description = ""
60
+
61
+ # Large size content are by default collapsed for a cleaner ui
62
+ default_collapse_content = true
63
+
64
+ # Chain of Thought (CoT) display mode. Can be "hidden", "tool_call" or "full".
65
+ cot = "full"
66
+
67
+ # Link to your github repo. This will add a github button in the UI's header.
68
+ # github = ""
69
+
70
+ # Specify a CSS file that can be used to customize the user interface.
71
+ # The CSS file can be served from the public directory or via an external link.
72
+ custom_css = "/public/files/test.css"
73
+
74
+ # Specify a Javascript file that can be used to customize the user interface.
75
+ # The Javascript file can be served from the public directory.
76
+ # custom_js = "/public/test.js"
77
+
78
+ # Specify a custom font url.
79
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
80
+
81
+ # Specify a custom meta image url.
82
+ # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
83
+
84
+ # Specify a custom build directory for the frontend.
85
+ # This can be used to customize the frontend code.
86
+ # Be careful: If this is a relative path, it should not start with a slash.
87
+ # custom_build = "./public/build"
88
+
89
+ [UI.theme]
90
+ default = "dark"
91
+ #layout = "wide"
92
+ #font_family = "Inter, sans-serif"
93
+ # Override default MUI light theme. (Check theme.ts)
94
+ [UI.theme.light]
95
+ #background = "#FAFAFA"
96
+ #paper = "#FFFFFF"
97
+
98
+ [UI.theme.light.primary]
99
+ #main = "#F80061"
100
+ #dark = "#980039"
101
+ #light = "#FFE7EB"
102
+ [UI.theme.light.text]
103
+ #primary = "#212121"
104
+ #secondary = "#616161"
105
+
106
+ # Override default MUI dark theme. (Check theme.ts)
107
+ [UI.theme.dark]
108
+ #background = "#FAFAFA"
109
+ #paper = "#FFFFFF"
110
+
111
+ [UI.theme.dark.primary]
112
+ #main = "#F80061"
113
+ #dark = "#980039"
114
+ #light = "#FFE7EB"
115
+ [UI.theme.dark.text]
116
+ #primary = "#EEEEEE"
117
+ #secondary = "#BDBDBD"
118
+
119
+ [meta]
120
+ generated_by = "1.1.402"
apps/chainlit_base/chainlit.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! πŸš€πŸ€–
2
+
3
+ Hi there, Developer! πŸ‘‹ We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links πŸ”—
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) πŸ“š
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! πŸ’¬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! πŸ’»πŸ˜Š
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
{code β†’ apps/chainlit_base}/chainlit_base.py RENAMED
@@ -1,19 +1,16 @@
1
  import chainlit.data as cl_data
2
  import asyncio
3
- import yaml
4
  from typing import Any, Dict, no_type_check
5
  import chainlit as cl
6
  from modules.chat.llm_tutor import LLMTutor
7
  from modules.chat.helpers import (
8
  get_sources,
9
- get_history_chat_resume,
10
  get_history_setup_llm,
11
- get_last_config,
12
  )
13
  import copy
14
- from chainlit.types import ThreadDict
15
  import time
16
  from langchain_community.callbacks import get_openai_callback
 
17
 
18
  USER_TIMEOUT = 60_000
19
  SYSTEM = "System"
@@ -22,22 +19,7 @@ AGENT = "Agent"
22
  YOU = "User"
23
  ERROR = "Error"
24
 
25
- with open("modules/config/config.yml", "r") as f:
26
- config = yaml.safe_load(f)
27
-
28
-
29
- # async def setup_data_layer():
30
- # """
31
- # Set up the data layer for chat logging.
32
- # """
33
- # if config["chat_logging"]["log_chat"]:
34
- # data_layer = CustomLiteralDataLayer(
35
- # api_key=LITERAL_API_KEY_LOGGING, server=LITERAL_API_URL
36
- # )
37
- # else:
38
- # data_layer = None
39
-
40
- # return data_layer
41
 
42
 
43
  class Chatbot:
@@ -47,13 +29,6 @@ class Chatbot:
47
  """
48
  self.config = config
49
 
50
- async def _load_config(self):
51
- """
52
- Load the configuration from a YAML file.
53
- """
54
- with open("modules/config/config.yml", "r") as f:
55
- return yaml.safe_load(f)
56
-
57
  @no_type_check
58
  async def setup_llm(self):
59
  """
@@ -225,38 +200,29 @@ class Chatbot:
225
  """
226
  Set starter messages for the chatbot.
227
  """
228
- # Return Starters only if the chat is new
229
-
230
- try:
231
- thread = cl_data._data_layer.get_thread(
232
- cl.context.session.thread_id
233
- ) # see if the thread has any steps
234
- if thread.steps or len(thread.steps) > 0:
235
- return None
236
- except Exception as e:
237
- print(e)
238
- return [
239
- cl.Starter(
240
- label="recording on CNNs?",
241
- message="Where can I find the recording for the lecture on Transformers?",
242
- icon="/public/adv-screen-recorder-svgrepo-com.svg",
243
- ),
244
- cl.Starter(
245
- label="where's the slides?",
246
- message="When are the lectures? I can't find the schedule.",
247
- icon="/public/alarmy-svgrepo-com.svg",
248
- ),
249
- cl.Starter(
250
- label="Due Date?",
251
- message="When is the final project due?",
252
- icon="/public/calendar-samsung-17-svgrepo-com.svg",
253
- ),
254
- cl.Starter(
255
- label="Explain backprop.",
256
- message="I didn't understand the math behind backprop, could you explain it?",
257
- icon="/public/acastusphoton-svgrepo-com.svg",
258
- ),
259
- ]
260
 
261
  def rename(self, orig_author: str):
262
  """
@@ -271,7 +237,7 @@ class Chatbot:
271
  rename_dict = {"Chatbot": LLM}
272
  return rename_dict.get(orig_author, orig_author)
273
 
274
- async def start(self, config=None):
275
  """
276
  Start the chatbot, initialize settings widgets,
277
  and display and load previous conversation if chat logging is enabled.
@@ -279,26 +245,13 @@ class Chatbot:
279
 
280
  start_time = time.time()
281
 
282
- self.config = (
283
- await self._load_config() if config is None else config
284
- ) # Reload the configuration on chat resume
285
-
286
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
287
 
288
- user = cl.user_session.get("user")
289
-
290
  # TODO: remove self.user with cl.user_session.get("user")
291
- try:
292
- self.user = {
293
- "user_id": user.identifier,
294
- "session_id": cl.context.session.thread_id,
295
- }
296
- except Exception as e:
297
- print(e)
298
- self.user = {
299
- "user_id": "guest",
300
- "session_id": cl.context.session.thread_id,
301
- }
302
 
303
  memory = cl.user_session.get("memory", [])
304
  self.llm_tutor = LLMTutor(self.config, user=self.user)
@@ -432,22 +385,8 @@ class Chatbot:
432
  elements=source_elements,
433
  author=LLM,
434
  actions=actions,
435
- metadata=self.config,
436
  ).send()
437
 
438
- async def on_chat_resume(self, thread: ThreadDict):
439
- thread_config = None
440
- steps = thread["steps"]
441
- k = self.config["llm_params"][
442
- "memory_window"
443
- ] # on resume, alwyas use the default memory window
444
- conversation_list = get_history_chat_resume(steps, k, SYSTEM, LLM)
445
- thread_config = get_last_config(
446
- steps
447
- ) # TODO: Returns None for now - which causes config to be reloaded with default values
448
- cl.user_session.set("memory", conversation_list)
449
- await self.start(config=thread_config)
450
-
451
  async def on_follow_up(self, action: cl.Action):
452
  user = cl.user_session.get("user")
453
  message = await cl.Message(
@@ -466,12 +405,9 @@ chatbot = Chatbot(config=config)
466
 
467
 
468
  async def start_app():
469
- # cl_data._data_layer = await setup_data_layer()
470
- # chatbot.literal_client = cl_data._data_layer.client if cl_data._data_layer else None
471
  cl.set_starters(chatbot.set_starters)
472
  cl.author_rename(chatbot.rename)
473
  cl.on_chat_start(chatbot.start)
474
- cl.on_chat_resume(chatbot.on_chat_resume)
475
  cl.on_message(chatbot.main)
476
  cl.on_settings_update(chatbot.update_llm)
477
  cl.action_callback("follow up question")(chatbot.on_follow_up)
 
1
  import chainlit.data as cl_data
2
  import asyncio
 
3
  from typing import Any, Dict, no_type_check
4
  import chainlit as cl
5
  from modules.chat.llm_tutor import LLMTutor
6
  from modules.chat.helpers import (
7
  get_sources,
 
8
  get_history_setup_llm,
 
9
  )
10
  import copy
 
11
  import time
12
  from langchain_community.callbacks import get_openai_callback
13
+ from config.config_manager import config_manager
14
 
15
  USER_TIMEOUT = 60_000
16
  SYSTEM = "System"
 
19
  YOU = "User"
20
  ERROR = "Error"
21
 
22
+ config = config_manager.get_config().dict()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
 
25
  class Chatbot:
 
29
  """
30
  self.config = config
31
 
 
 
 
 
 
 
 
32
  @no_type_check
33
  async def setup_llm(self):
34
  """
 
200
  """
201
  Set starter messages for the chatbot.
202
  """
203
+
204
+ return [
205
+ cl.Starter(
206
+ label="recording on Transformers?",
207
+ message="Where can I find the recording for the lecture on Transformers?",
208
+ icon="/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg",
209
+ ),
210
+ cl.Starter(
211
+ label="where's the slides?",
212
+ message="When are the lectures? I can't find the schedule.",
213
+ icon="/public/assets/images/starter_icons/alarmy-svgrepo-com.svg",
214
+ ),
215
+ cl.Starter(
216
+ label="Due Date?",
217
+ message="When is the final project due?",
218
+ icon="/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg",
219
+ ),
220
+ cl.Starter(
221
+ label="Explain backprop.",
222
+ message="I didn't understand the math behind backprop, could you explain it?",
223
+ icon="/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg",
224
+ ),
225
+ ]
 
 
 
 
 
 
 
 
 
226
 
227
  def rename(self, orig_author: str):
228
  """
 
237
  rename_dict = {"Chatbot": LLM}
238
  return rename_dict.get(orig_author, orig_author)
239
 
240
+ async def start(self):
241
  """
242
  Start the chatbot, initialize settings widgets,
243
  and display and load previous conversation if chat logging is enabled.
 
245
 
246
  start_time = time.time()
247
 
 
 
 
 
248
  await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
249
 
 
 
250
  # TODO: remove self.user with cl.user_session.get("user")
251
+ self.user = {
252
+ "user_id": "guest",
253
+ "session_id": cl.context.session.thread_id,
254
+ }
 
 
 
 
 
 
 
255
 
256
  memory = cl.user_session.get("memory", [])
257
  self.llm_tutor = LLMTutor(self.config, user=self.user)
 
385
  elements=source_elements,
386
  author=LLM,
387
  actions=actions,
 
388
  ).send()
389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390
  async def on_follow_up(self, action: cl.Action):
391
  user = cl.user_session.get("user")
392
  message = await cl.Message(
 
405
 
406
 
407
  async def start_app():
 
 
408
  cl.set_starters(chatbot.set_starters)
409
  cl.author_rename(chatbot.rename)
410
  cl.on_chat_start(chatbot.start)
 
411
  cl.on_message(chatbot.main)
412
  cl.on_settings_update(chatbot.update_llm)
413
  cl.action_callback("follow up question")(chatbot.on_follow_up)
apps/chainlit_base/config/config.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: 'storage/logs' # str
2
+ log_chunk_dir: 'storage/logs/chunks' # str
3
+ device: 'cpu' # str [cuda, cpu]
4
+
5
+ vectorstore:
6
+ load_from_HF: True # bool
7
+ reparse_files: True # bool
8
+ data_path: 'storage/data' # str
9
+ url_file_path: 'storage/data/urls.txt' # str
10
+ expand_urls: True # bool
11
+ db_option : 'RAGatouille' # str [FAISS, Chroma, RAGatouille, RAPTOR]
12
+ db_path : 'vectorstores' # str
13
+ model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
14
+ search_top_k : 3 # int
15
+ score_threshold : 0.2 # float
16
+
17
+ faiss_params: # Not used as of now
18
+ index_path: 'vectorstores/faiss.index' # str
19
+ index_type: 'Flat' # str [Flat, HNSW, IVF]
20
+ index_dimension: 384 # int
21
+ index_nlist: 100 # int
22
+ index_nprobe: 10 # int
23
+
24
+ colbert_params:
25
+ index_name: "new_idx" # str
26
+
27
+ llm_params:
28
+ llm_arch: 'langchain' # [langchain]
29
+ use_history: True # bool
30
+ generate_follow_up: False # bool
31
+ memory_window: 3 # int
32
+ llm_style: 'Normal' # str [Normal, ELI5]
33
+ llm_loader: 'gpt-4o-mini' # str [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
34
+ openai_params:
35
+ temperature: 0.7 # float
36
+ local_llm_params:
37
+ temperature: 0.7 # float
38
+ repo_id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF' # HuggingFace repo id
39
+ filename: 'tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Specific name of gguf file in the repo
40
+ model_path: 'storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Path to the model file
41
+ stream: False # bool
42
+ pdf_reader: 'gpt' # str [llama, pymupdf, gpt]
43
+
44
+ chat_logging:
45
+ log_chat: True # bool
46
+ platform: 'literalai'
47
+ callbacks: True # bool
48
+
49
+ splitter_options:
50
+ use_splitter: True # bool
51
+ split_by_token : True # bool
52
+ remove_leftover_delimiters: True # bool
53
+ remove_chunks: False # bool
54
+ chunking_mode: 'semantic' # str [fixed, semantic]
55
+ chunk_size : 300 # int
56
+ chunk_overlap : 30 # int
57
+ chunk_separators : ["\n\n", "\n", " ", ""] # list of strings
58
+ front_chunks_to_remove : null # int or None
59
+ last_chunks_to_remove : null # int or None
60
+ delimiters_to_remove : ['\t', '\n', ' ', ' '] # list of strings
apps/chainlit_base/config/config_manager.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, conint, confloat, HttpUrl
2
+ from typing import Optional, List
3
+ import yaml
4
+
5
+
6
+ class FaissParams(BaseModel):
7
+ index_path: str = "vectorstores/faiss.index"
8
+ index_type: str = "Flat" # Options: [Flat, HNSW, IVF]
9
+ index_dimension: conint(gt=0) = 384
10
+ index_nlist: conint(gt=0) = 100
11
+ index_nprobe: conint(gt=0) = 10
12
+
13
+
14
+ class ColbertParams(BaseModel):
15
+ index_name: str = "new_idx"
16
+
17
+
18
+ class VectorStoreConfig(BaseModel):
19
+ load_from_HF: bool = True
20
+ reparse_files: bool = True
21
+ data_path: str = "storage/data"
22
+ url_file_path: str = "storage/data/urls.txt"
23
+ expand_urls: bool = True
24
+ db_option: str = "RAGatouille" # Options: [FAISS, Chroma, RAGatouille, RAPTOR]
25
+ db_path: str = "vectorstores"
26
+ model: str = (
27
+ "sentence-transformers/all-MiniLM-L6-v2" # Options: [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002]
28
+ )
29
+ search_top_k: conint(gt=0) = 3
30
+ score_threshold: confloat(ge=0.0, le=1.0) = 0.2
31
+
32
+ faiss_params: Optional[FaissParams] = None
33
+ colbert_params: Optional[ColbertParams] = None
34
+
35
+
36
+ class OpenAIParams(BaseModel):
37
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
38
+
39
+
40
+ class LocalLLMParams(BaseModel):
41
+ temperature: confloat(ge=0.0, le=1.0) = 0.7
42
+ repo_id: str = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" # HuggingFace repo id
43
+ filename: str = (
44
+ "tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Specific name of gguf file in the repo
45
+ )
46
+ model_path: str = (
47
+ "storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf" # Path to the model file
48
+ )
49
+
50
+
51
+ class LLMParams(BaseModel):
52
+ llm_arch: str = "langchain" # Options: [langchain]
53
+ use_history: bool = True
54
+ generate_follow_up: bool = False
55
+ memory_window: conint(ge=1) = 3
56
+ llm_style: str = "Normal" # Options: [Normal, ELI5]
57
+ llm_loader: str = (
58
+ "gpt-4o-mini" # Options: [local_llm, gpt-3.5-turbo-1106, gpt-4, gpt-4o-mini]
59
+ )
60
+ openai_params: Optional[OpenAIParams] = None
61
+ local_llm_params: Optional[LocalLLMParams] = None
62
+ stream: bool = False
63
+ pdf_reader: str = "gpt" # Options: [llama, pymupdf, gpt]
64
+
65
+
66
+ class ChatLoggingConfig(BaseModel):
67
+ log_chat: bool = True
68
+ platform: str = "literalai"
69
+ callbacks: bool = True
70
+
71
+
72
+ class SplitterOptions(BaseModel):
73
+ use_splitter: bool = True
74
+ split_by_token: bool = True
75
+ remove_leftover_delimiters: bool = True
76
+ remove_chunks: bool = False
77
+ chunking_mode: str = "semantic" # Options: [fixed, semantic]
78
+ chunk_size: conint(gt=0) = 300
79
+ chunk_overlap: conint(ge=0) = 30
80
+ chunk_separators: List[str] = ["\n\n", "\n", " ", ""]
81
+ front_chunks_to_remove: Optional[conint(ge=0)] = None
82
+ last_chunks_to_remove: Optional[conint(ge=0)] = None
83
+ delimiters_to_remove: List[str] = ["\t", "\n", " ", " "]
84
+
85
+
86
+ class RetrieverConfig(BaseModel):
87
+ retriever_hf_paths: dict[str, str] = {"RAGatouille": "XThomasBU/Colbert_Index"}
88
+
89
+
90
+ class MetadataConfig(BaseModel):
91
+ metadata_links: List[HttpUrl] = [
92
+ "https://dl4ds.github.io/sp2024/lectures/",
93
+ "https://dl4ds.github.io/sp2024/schedule/",
94
+ ]
95
+ slide_base_link: HttpUrl = "https://dl4ds.github.io"
96
+
97
+
98
+ class APIConfig(BaseModel):
99
+ timeout: conint(gt=0) = 60
100
+
101
+
102
+ class Config(BaseModel):
103
+ log_dir: str = "storage/logs"
104
+ log_chunk_dir: str = "storage/logs/chunks"
105
+ device: str = "cpu" # Options: ['cuda', 'cpu']
106
+
107
+ vectorstore: VectorStoreConfig
108
+ llm_params: LLMParams
109
+ chat_logging: ChatLoggingConfig
110
+ splitter_options: SplitterOptions
111
+ retriever: RetrieverConfig
112
+ metadata: MetadataConfig
113
+ api_config: APIConfig
114
+
115
+
116
+ class ConfigManager:
117
+ def __init__(self, config_path: str, project_config_path: str):
118
+ self.config_path = config_path
119
+ self.project_config_path = project_config_path
120
+ self.config = self.load_config()
121
+ self.validate_config()
122
+
123
+ def load_config(self) -> Config:
124
+ with open(self.config_path, "r") as f:
125
+ config_data = yaml.safe_load(f)
126
+
127
+ with open(self.project_config_path, "r") as f:
128
+ project_config_data = yaml.safe_load(f)
129
+
130
+ # Merge the two configurations
131
+ merged_config = {**config_data, **project_config_data}
132
+
133
+ return Config(**merged_config)
134
+
135
+ def get_config(self) -> Config:
136
+ return ConfigWrapper(self.config)
137
+
138
+ def validate_config(self):
139
+ # If any required fields are missing, raise an error
140
+ # required_fields = [
141
+ # "vectorstore", "llm_params", "chat_logging", "splitter_options",
142
+ # "retriever", "metadata", "token_config", "misc", "api_config"
143
+ # ]
144
+ # for field in required_fields:
145
+ # if not hasattr(self.config, field):
146
+ # raise ValueError(f"Missing required configuration field: {field}")
147
+
148
+ # # Validate types of specific fields
149
+ # if not isinstance(self.config.vectorstore, VectorStoreConfig):
150
+ # raise TypeError("vectorstore must be an instance of VectorStoreConfig")
151
+ # if not isinstance(self.config.llm_params, LLMParams):
152
+ # raise TypeError("llm_params must be an instance of LLMParams")
153
+ pass
154
+
155
+
156
+ class ConfigWrapper:
157
+ def __init__(self, config: Config):
158
+ self._config = config
159
+
160
+ def __getitem__(self, key):
161
+ return getattr(self._config, key)
162
+
163
+ def __getattr__(self, name):
164
+ return getattr(self._config, name)
165
+
166
+ def dict(self):
167
+ return self._config.dict()
168
+
169
+
170
+ # Usage
171
+ config_manager = ConfigManager(
172
+ config_path="config/config.yml", project_config_path="config/project_config.yml"
173
+ )
174
+ # config = config_manager.get_config().dict()
{code/modules β†’ apps/chainlit_base}/config/project_config.yml RENAMED
@@ -4,4 +4,7 @@ retriever:
4
 
5
  metadata:
6
  metadata_links: ["https://dl4ds.github.io/sp2024/lectures/", "https://dl4ds.github.io/sp2024/schedule/"]
7
- slide_base_link: "https://dl4ds.github.io"
 
 
 
 
4
 
5
  metadata:
6
  metadata_links: ["https://dl4ds.github.io/sp2024/lectures/", "https://dl4ds.github.io/sp2024/schedule/"]
7
+ slide_base_link: "https://dl4ds.github.io"
8
+
9
+ api_config:
10
+ timeout: 60
apps/chainlit_base/config/prompts.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prompts = {
2
+ "openai": {
3
+ "rephrase_prompt": (
4
+ "You are someone that rephrases statements. Rephrase the student's question to add context from their chat history if relevant, ensuring it remains from the student's point of view. "
5
+ "Incorporate relevant details from the chat history to make the question clearer and more specific. "
6
+ "Do not change the meaning of the original statement, and maintain the student's tone and perspective. "
7
+ "If the question is conversational and doesn't require context, do not rephrase it. "
8
+ "Example: If the student previously asked about backpropagation in the context of deep learning and now asks 'what is it', rephrase to 'What is backpropagation.'. "
9
+ "Example: Do not rephrase if the user is asking something specific like 'cool, suggest a project with transformers to use as my final project' "
10
+ "Chat history: \n{chat_history}\n"
11
+ "Rephrase the following question only if necessary: '{input}'"
12
+ "Rephrased Question:'"
13
+ ),
14
+ "prompt_with_history": {
15
+ "normal": (
16
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
17
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
18
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
19
+ "Render math equations in LaTeX format between $ or $$ signs, stick to the parameter and variable icons found in your context. Be sure to explain the parameters and variables in the equations."
20
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
21
+ "Do not get influenced by the style of conversation in the chat history. Follow the instructions given here."
22
+ "Chat History:\n{chat_history}\n\n"
23
+ "Context:\n{context}\n\n"
24
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
25
+ "Student: {input}\n"
26
+ "AI Tutor:"
27
+ ),
28
+ "eli5": (
29
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Your job is to explain things in the simplest and most engaging way possible, just like the 'Explain Like I'm 5' (ELI5) concept."
30
+ "If you don't know the answer, do your best without making things up. Keep your explanations straightforward and very easy to understand."
31
+ "Use the chat history and context to help you, but avoid repeating past responses. Provide links from the source_file metadata when they're helpful."
32
+ "Use very simple language and examples to explain any math equations, and put the equations in LaTeX format between $ or $$ signs."
33
+ "Be friendly and engaging, like you're chatting with a young child who's curious and eager to learn. Avoid complex terms and jargon."
34
+ "Include simple and clear examples wherever you can to make things easier to understand."
35
+ "Do not get influenced by the style of conversation in the chat history. Follow the instructions given here."
36
+ "Chat History:\n{chat_history}\n\n"
37
+ "Context:\n{context}\n\n"
38
+ "Answer the student's question below in a friendly, simple, and engaging way, just like the ELI5 concept. Use the context and history only if they're relevant, otherwise, just have a natural conversation."
39
+ "Give a clear and detailed explanation with simple examples to make it easier to understand. Remember, your goal is to break down complex topics into very simple terms, just like ELI5."
40
+ "Student: {input}\n"
41
+ "AI Tutor:"
42
+ ),
43
+ "socratic": (
44
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Engage the student in a Socratic dialogue to help them discover answers on their own. Use the provided context to guide your questioning."
45
+ "If you don't know the answer, do your best without making things up. Keep the conversation engaging and inquisitive."
46
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata when relevant. Use the source context that is most relevant."
47
+ "Speak in a friendly and engaging manner, encouraging critical thinking and self-discovery."
48
+ "Use questions to lead the student to explore the topic and uncover answers."
49
+ "Chat History:\n{chat_history}\n\n"
50
+ "Context:\n{context}\n\n"
51
+ "Answer the student's question below by guiding them through a series of questions and insights that lead to deeper understanding. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation."
52
+ "Foster an inquisitive mindset and help the student discover answers through dialogue."
53
+ "Student: {input}\n"
54
+ "AI Tutor:"
55
+ ),
56
+ },
57
+ "prompt_no_history": (
58
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
59
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
60
+ "Provide links from the source_file metadata. Use the source context that is most relevant. "
61
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
62
+ "Context:\n{context}\n\n"
63
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
64
+ "Student: {input}\n"
65
+ "AI Tutor:"
66
+ ),
67
+ },
68
+ "tiny_llama": {
69
+ "prompt_no_history": (
70
+ "system\n"
71
+ "Assistant is an intelligent chatbot designed to help students with questions regarding the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance.\n"
72
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally.\n"
73
+ "Provide links from the source_file metadata. Use the source context that is most relevant.\n"
74
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
75
+ "\n\n"
76
+ "user\n"
77
+ "Context:\n{context}\n\n"
78
+ "Question: {input}\n"
79
+ "\n\n"
80
+ "assistant"
81
+ ),
82
+ "prompt_with_history": (
83
+ "system\n"
84
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
85
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
86
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
87
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
88
+ "\n\n"
89
+ "user\n"
90
+ "Chat History:\n{chat_history}\n\n"
91
+ "Context:\n{context}\n\n"
92
+ "Question: {input}\n"
93
+ "\n\n"
94
+ "assistant"
95
+ ),
96
+ },
97
+ }
apps/chainlit_base/public/assets/images/avatars/ai-tutor.png ADDED
apps/chainlit_base/public/assets/images/avatars/ai_tutor.png ADDED
apps/chainlit_base/public/assets/images/starter_icons/acastusphoton-svgrepo-com.svg ADDED
apps/chainlit_base/public/assets/images/starter_icons/adv-screen-recorder-svgrepo-com.svg ADDED
apps/chainlit_base/public/assets/images/starter_icons/alarmy-svgrepo-com.svg ADDED
apps/chainlit_base/public/assets/images/starter_icons/calendar-samsung-17-svgrepo-com.svg ADDED
apps/chainlit_base/public/files/students_encrypted.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"7f1cacca66ee914ddde2ee20e0f2c96651d60cd8aabd310ef25a9e6d88f42df0": ["instructor", "bu"], "f74d264b6b5b2b4c10ce69e4ec16e869e01cb5eb668ed846aa8f6dae5c96cda0": ["admin", "instructor", "bu"], "53401356a874b1539775c73a8564d5e5f4f840441630c9cf649e16d201454f20": ["instructor", "bu"]}
{code/public β†’ apps/chainlit_base/public/files}/test.css RENAMED
@@ -2,20 +2,23 @@ a[href*='https://github.com/Chainlit/chainlit'] {
2
  visibility: hidden;
3
  }
4
 
5
-
 
 
 
 
 
6
  /* Target the container of the image and set a custom background image */
7
  .MuiAvatar-root.MuiAvatar-circular.css-m2icte {
8
- background-image: url('/public/avatars/ai-tutor.png'); /* Replace with your custom image URL */
9
  background-size: cover; /* Ensure the image covers the entire container */
10
  background-position: center; /* Center the image */
11
  width: 100px; /* Ensure the dimensions match the original */
12
  height: 100px; /* Ensure the dimensions match the original */
13
  border-radius: 50%; /* Maintain circular shape */
14
  }
15
-
16
-
17
  .MuiAvatar-root.MuiAvatar-circular.css-v72an7 {
18
- background-image: url('/public/avatars/ai-tutor.png'); /* Replace with your custom image URL */
19
  background-size: cover; /* Ensure the image covers the entire container */
20
  background-position: center; /* Center the image */
21
  width: 40px; /* Ensure the dimensions match the original */
 
2
  visibility: hidden;
3
  }
4
 
5
+ /* TODO: Default image is still shown in the starter questions */
6
+ /* Hide the default avatar image */
7
+ .MuiAvatar-root img.MuiAvatar-img {
8
+ display: none;
9
+ }
10
+
11
  /* Target the container of the image and set a custom background image */
12
  .MuiAvatar-root.MuiAvatar-circular.css-m2icte {
13
+ background-image: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with your custom image URL */
14
  background-size: cover; /* Ensure the image covers the entire container */
15
  background-position: center; /* Center the image */
16
  width: 100px; /* Ensure the dimensions match the original */
17
  height: 100px; /* Ensure the dimensions match the original */
18
  border-radius: 50%; /* Maintain circular shape */
19
  }
 
 
20
  .MuiAvatar-root.MuiAvatar-circular.css-v72an7 {
21
+ background-image: url('/public/assets/images/avatars/ai-tutor.png'); /* Replace with your custom image URL */
22
  background-size: cover; /* Ensure the image covers the entire container */
23
  background-position: center; /* Center the image */
24
  width: 40px; /* Ensure the dimensions match the original */
apps/chainlit_base/public/logo_dark.png ADDED