Spaces:
Runtime error
Runtime error
Upload 34 files
Browse files- .gitignore +50 -0
- CMD_FLAGS.txt +4 -0
- Colab-TextGen-GPU.ipynb +130 -0
- LICENSE +661 -0
- README.md +428 -13
- cmd_linux.sh +22 -0
- cmd_macos.sh +24 -0
- cmd_windows.bat +34 -0
- cmd_wsl.bat +11 -0
- convert-to-safetensors.py +38 -0
- download-model.py +313 -0
- one_click.py +399 -0
- requirements.txt +67 -0
- requirements_amd.txt +45 -0
- requirements_amd_noavx2.txt +43 -0
- requirements_apple_intel.txt +37 -0
- requirements_apple_silicon.txt +39 -0
- requirements_cpu_only.txt +35 -0
- requirements_cpu_only_noavx2.txt +35 -0
- requirements_noavx2.txt +67 -0
- requirements_nowheels.txt +29 -0
- server.py +262 -0
- settings-template.yaml +74 -0
- settings.yaml +8 -0
- setup.cfg +3 -0
- start_linux.sh +67 -0
- start_macos.sh +67 -0
- start_windows.bat +84 -0
- start_wsl.bat +11 -0
- update_linux.sh +26 -0
- update_macos.sh +26 -0
- update_windows.bat +37 -0
- update_wsl.bat +11 -0
- wsl.sh +112 -0
.gitignore
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/cache
|
2 |
+
/characters
|
3 |
+
/css
|
4 |
+
/extensions
|
5 |
+
/grammars
|
6 |
+
/installer_files
|
7 |
+
/logs
|
8 |
+
/loras
|
9 |
+
/models
|
10 |
+
/presets
|
11 |
+
/prompts
|
12 |
+
/repositories
|
13 |
+
/softprompts
|
14 |
+
/torch-dumps
|
15 |
+
/training/datasets
|
16 |
+
|
17 |
+
/CMD_FLAGS.txt
|
18 |
+
/img_bot*
|
19 |
+
/img_me*
|
20 |
+
/models/config-user.yaml
|
21 |
+
/notification.mp3
|
22 |
+
/settings*.json
|
23 |
+
/settings*.yaml
|
24 |
+
|
25 |
+
.chroma
|
26 |
+
.DS_Store
|
27 |
+
.eslintrc.js
|
28 |
+
.idea
|
29 |
+
.venv
|
30 |
+
venv
|
31 |
+
.envrc
|
32 |
+
.direnv
|
33 |
+
.vs
|
34 |
+
.vscode
|
35 |
+
*.bak
|
36 |
+
*.ipynb
|
37 |
+
*.log
|
38 |
+
*pycache*
|
39 |
+
cert.pem
|
40 |
+
key.pem
|
41 |
+
package.json
|
42 |
+
package-lock.json
|
43 |
+
Thumbs.db
|
44 |
+
wandb
|
45 |
+
|
46 |
+
# ignore user docker config and top level links to docker files
|
47 |
+
/docker-compose.yaml
|
48 |
+
/docker-compose.yml
|
49 |
+
/Dockerfile
|
50 |
+
.env
|
CMD_FLAGS.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Only used by the one-click installer.
|
2 |
+
# Example:
|
3 |
+
#--listen
|
4 |
+
--api --api-port 9500 --verbose --character Devia
|
Colab-TextGen-GPU.ipynb
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"private_outputs": true,
|
7 |
+
"provenance": [],
|
8 |
+
"gpuType": "T4"
|
9 |
+
},
|
10 |
+
"kernelspec": {
|
11 |
+
"name": "python3",
|
12 |
+
"display_name": "Python 3"
|
13 |
+
},
|
14 |
+
"language_info": {
|
15 |
+
"name": "python"
|
16 |
+
},
|
17 |
+
"accelerator": "GPU"
|
18 |
+
},
|
19 |
+
"cells": [
|
20 |
+
{
|
21 |
+
"cell_type": "markdown",
|
22 |
+
"source": [
|
23 |
+
"# oobabooga/text-generation-webui\n",
|
24 |
+
"\n",
|
25 |
+
"After running both cells, a public gradio URL will appear at the bottom in a few minutes. You can optionally generate an API link.\n",
|
26 |
+
"\n",
|
27 |
+
"* Project page: https://github.com/oobabooga/text-generation-webui\n",
|
28 |
+
"* Gradio server status: https://status.gradio.app/"
|
29 |
+
],
|
30 |
+
"metadata": {
|
31 |
+
"id": "MFQl6-FjSYtY"
|
32 |
+
}
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"cell_type": "code",
|
36 |
+
"source": [
|
37 |
+
"#@title 1. Keep this tab alive to prevent Colab from disconnecting you { display-mode: \"form\" }\n",
|
38 |
+
"\n",
|
39 |
+
"#@markdown Press play on the music player that will appear below:\n",
|
40 |
+
"%%html\n",
|
41 |
+
"<audio src=\"https://oobabooga.github.io/silence.m4a\" controls>"
|
42 |
+
],
|
43 |
+
"metadata": {
|
44 |
+
"id": "f7TVVj_z4flw"
|
45 |
+
},
|
46 |
+
"execution_count": null,
|
47 |
+
"outputs": []
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"cell_type": "code",
|
51 |
+
"source": [
|
52 |
+
"#@title 2. Launch the web UI\n",
|
53 |
+
"\n",
|
54 |
+
"#@markdown If unsure about the branch, write \"main\" or leave it blank.\n",
|
55 |
+
"\n",
|
56 |
+
"import torch\n",
|
57 |
+
"from pathlib import Path\n",
|
58 |
+
"\n",
|
59 |
+
"if Path.cwd().name != 'text-generation-webui':\n",
|
60 |
+
" print(\"Installing the webui...\")\n",
|
61 |
+
"\n",
|
62 |
+
" !git clone https://github.com/oobabooga/text-generation-webui\n",
|
63 |
+
" %cd text-generation-webui\n",
|
64 |
+
"\n",
|
65 |
+
" torver = torch.__version__\n",
|
66 |
+
" print(f\"TORCH: {torver}\")\n",
|
67 |
+
" is_cuda118 = '+cu118' in torver # 2.1.0+cu118\n",
|
68 |
+
"\n",
|
69 |
+
" textgen_requirements = open('requirements.txt').read().splitlines()\n",
|
70 |
+
" if is_cuda118:\n",
|
71 |
+
" textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]\n",
|
72 |
+
" with open('temp_requirements.txt', 'w') as file:\n",
|
73 |
+
" file.write('\\n'.join(textgen_requirements))\n",
|
74 |
+
"\n",
|
75 |
+
" !pip install -r extensions/openai/requirements.txt --upgrade\n",
|
76 |
+
" !pip install -r temp_requirements.txt --upgrade\n",
|
77 |
+
"\n",
|
78 |
+
" print(\"\\033[1;32;1m\\n --> If you see a warning about \\\"previously imported packages\\\", just ignore it.\\033[0;37;0m\")\n",
|
79 |
+
" print(\"\\033[1;32;1m\\n --> There is no need to restart the runtime.\\n\\033[0;37;0m\")\n",
|
80 |
+
"\n",
|
81 |
+
" try:\n",
|
82 |
+
" import flash_attn\n",
|
83 |
+
" except:\n",
|
84 |
+
" !pip uninstall -y flash_attn\n",
|
85 |
+
"\n",
|
86 |
+
"# Parameters\n",
|
87 |
+
"model_url = \"https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ\" #@param {type:\"string\"}\n",
|
88 |
+
"branch = \"gptq-4bit-32g-actorder_True\" #@param {type:\"string\"}\n",
|
89 |
+
"command_line_flags = \"--n-gpu-layers 128 --load-in-4bit --use_double_quant\" #@param {type:\"string\"}\n",
|
90 |
+
"api = False #@param {type:\"boolean\"}\n",
|
91 |
+
"\n",
|
92 |
+
"if api:\n",
|
93 |
+
" for param in ['--api', '--public-api']:\n",
|
94 |
+
" if param not in command_line_flags:\n",
|
95 |
+
" command_line_flags += f\" {param}\"\n",
|
96 |
+
"\n",
|
97 |
+
"model_url = model_url.strip()\n",
|
98 |
+
"if model_url != \"\":\n",
|
99 |
+
" if not model_url.startswith('http'):\n",
|
100 |
+
" model_url = 'https://huggingface.co/' + model_url\n",
|
101 |
+
"\n",
|
102 |
+
" # Download the model\n",
|
103 |
+
" url_parts = model_url.strip('/').strip().split('/')\n",
|
104 |
+
" output_folder = f\"{url_parts[-2]}_{url_parts[-1]}\"\n",
|
105 |
+
" branch = branch.strip('\"\\' ')\n",
|
106 |
+
" if branch.strip() not in ['', 'main']:\n",
|
107 |
+
" output_folder += f\"_{branch}\"\n",
|
108 |
+
" !python download-model.py {model_url} --branch {branch}\n",
|
109 |
+
" else:\n",
|
110 |
+
" !python download-model.py {model_url}\n",
|
111 |
+
"else:\n",
|
112 |
+
" output_folder = \"\"\n",
|
113 |
+
"\n",
|
114 |
+
"# Start the web UI\n",
|
115 |
+
"cmd = f\"python server.py --share\"\n",
|
116 |
+
"if output_folder != \"\":\n",
|
117 |
+
" cmd += f\" --model {output_folder}\"\n",
|
118 |
+
"cmd += f\" {command_line_flags}\"\n",
|
119 |
+
"print(cmd)\n",
|
120 |
+
"!$cmd"
|
121 |
+
],
|
122 |
+
"metadata": {
|
123 |
+
"id": "LGQ8BiMuXMDG",
|
124 |
+
"cellView": "form"
|
125 |
+
},
|
126 |
+
"execution_count": null,
|
127 |
+
"outputs": []
|
128 |
+
}
|
129 |
+
]
|
130 |
+
}
|
LICENSE
ADDED
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU AFFERO GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 19 November 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU Affero General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works, specifically designed to ensure
|
12 |
+
cooperation with the community in the case of network server software.
|
13 |
+
|
14 |
+
The licenses for most software and other practical works are designed
|
15 |
+
to take away your freedom to share and change the works. By contrast,
|
16 |
+
our General Public Licenses are intended to guarantee your freedom to
|
17 |
+
share and change all versions of a program--to make sure it remains free
|
18 |
+
software for all its users.
|
19 |
+
|
20 |
+
When we speak of free software, we are referring to freedom, not
|
21 |
+
price. Our General Public Licenses are designed to make sure that you
|
22 |
+
have the freedom to distribute copies of free software (and charge for
|
23 |
+
them if you wish), that you receive source code or can get it if you
|
24 |
+
want it, that you can change the software or use pieces of it in new
|
25 |
+
free programs, and that you know you can do these things.
|
26 |
+
|
27 |
+
Developers that use our General Public Licenses protect your rights
|
28 |
+
with two steps: (1) assert copyright on the software, and (2) offer
|
29 |
+
you this License which gives you legal permission to copy, distribute
|
30 |
+
and/or modify the software.
|
31 |
+
|
32 |
+
A secondary benefit of defending all users' freedom is that
|
33 |
+
improvements made in alternate versions of the program, if they
|
34 |
+
receive widespread use, become available for other developers to
|
35 |
+
incorporate. Many developers of free software are heartened and
|
36 |
+
encouraged by the resulting cooperation. However, in the case of
|
37 |
+
software used on network servers, this result may fail to come about.
|
38 |
+
The GNU General Public License permits making a modified version and
|
39 |
+
letting the public access it on a server without ever releasing its
|
40 |
+
source code to the public.
|
41 |
+
|
42 |
+
The GNU Affero General Public License is designed specifically to
|
43 |
+
ensure that, in such cases, the modified source code becomes available
|
44 |
+
to the community. It requires the operator of a network server to
|
45 |
+
provide the source code of the modified version running there to the
|
46 |
+
users of that server. Therefore, public use of a modified version, on
|
47 |
+
a publicly accessible server, gives the public access to the source
|
48 |
+
code of the modified version.
|
49 |
+
|
50 |
+
An older license, called the Affero General Public License and
|
51 |
+
published by Affero, was designed to accomplish similar goals. This is
|
52 |
+
a different license, not a version of the Affero GPL, but Affero has
|
53 |
+
released a new version of the Affero GPL which permits relicensing under
|
54 |
+
this license.
|
55 |
+
|
56 |
+
The precise terms and conditions for copying, distribution and
|
57 |
+
modification follow.
|
58 |
+
|
59 |
+
TERMS AND CONDITIONS
|
60 |
+
|
61 |
+
0. Definitions.
|
62 |
+
|
63 |
+
"This License" refers to version 3 of the GNU Affero General Public License.
|
64 |
+
|
65 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
66 |
+
works, such as semiconductor masks.
|
67 |
+
|
68 |
+
"The Program" refers to any copyrightable work licensed under this
|
69 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
70 |
+
"recipients" may be individuals or organizations.
|
71 |
+
|
72 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
73 |
+
in a fashion requiring copyright permission, other than the making of an
|
74 |
+
exact copy. The resulting work is called a "modified version" of the
|
75 |
+
earlier work or a work "based on" the earlier work.
|
76 |
+
|
77 |
+
A "covered work" means either the unmodified Program or a work based
|
78 |
+
on the Program.
|
79 |
+
|
80 |
+
To "propagate" a work means to do anything with it that, without
|
81 |
+
permission, would make you directly or secondarily liable for
|
82 |
+
infringement under applicable copyright law, except executing it on a
|
83 |
+
computer or modifying a private copy. Propagation includes copying,
|
84 |
+
distribution (with or without modification), making available to the
|
85 |
+
public, and in some countries other activities as well.
|
86 |
+
|
87 |
+
To "convey" a work means any kind of propagation that enables other
|
88 |
+
parties to make or receive copies. Mere interaction with a user through
|
89 |
+
a computer network, with no transfer of a copy, is not conveying.
|
90 |
+
|
91 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
92 |
+
to the extent that it includes a convenient and prominently visible
|
93 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
94 |
+
tells the user that there is no warranty for the work (except to the
|
95 |
+
extent that warranties are provided), that licensees may convey the
|
96 |
+
work under this License, and how to view a copy of this License. If
|
97 |
+
the interface presents a list of user commands or options, such as a
|
98 |
+
menu, a prominent item in the list meets this criterion.
|
99 |
+
|
100 |
+
1. Source Code.
|
101 |
+
|
102 |
+
The "source code" for a work means the preferred form of the work
|
103 |
+
for making modifications to it. "Object code" means any non-source
|
104 |
+
form of a work.
|
105 |
+
|
106 |
+
A "Standard Interface" means an interface that either is an official
|
107 |
+
standard defined by a recognized standards body, or, in the case of
|
108 |
+
interfaces specified for a particular programming language, one that
|
109 |
+
is widely used among developers working in that language.
|
110 |
+
|
111 |
+
The "System Libraries" of an executable work include anything, other
|
112 |
+
than the work as a whole, that (a) is included in the normal form of
|
113 |
+
packaging a Major Component, but which is not part of that Major
|
114 |
+
Component, and (b) serves only to enable use of the work with that
|
115 |
+
Major Component, or to implement a Standard Interface for which an
|
116 |
+
implementation is available to the public in source code form. A
|
117 |
+
"Major Component", in this context, means a major essential component
|
118 |
+
(kernel, window system, and so on) of the specific operating system
|
119 |
+
(if any) on which the executable work runs, or a compiler used to
|
120 |
+
produce the work, or an object code interpreter used to run it.
|
121 |
+
|
122 |
+
The "Corresponding Source" for a work in object code form means all
|
123 |
+
the source code needed to generate, install, and (for an executable
|
124 |
+
work) run the object code and to modify the work, including scripts to
|
125 |
+
control those activities. However, it does not include the work's
|
126 |
+
System Libraries, or general-purpose tools or generally available free
|
127 |
+
programs which are used unmodified in performing those activities but
|
128 |
+
which are not part of the work. For example, Corresponding Source
|
129 |
+
includes interface definition files associated with source files for
|
130 |
+
the work, and the source code for shared libraries and dynamically
|
131 |
+
linked subprograms that the work is specifically designed to require,
|
132 |
+
such as by intimate data communication or control flow between those
|
133 |
+
subprograms and other parts of the work.
|
134 |
+
|
135 |
+
The Corresponding Source need not include anything that users
|
136 |
+
can regenerate automatically from other parts of the Corresponding
|
137 |
+
Source.
|
138 |
+
|
139 |
+
The Corresponding Source for a work in source code form is that
|
140 |
+
same work.
|
141 |
+
|
142 |
+
2. Basic Permissions.
|
143 |
+
|
144 |
+
All rights granted under this License are granted for the term of
|
145 |
+
copyright on the Program, and are irrevocable provided the stated
|
146 |
+
conditions are met. This License explicitly affirms your unlimited
|
147 |
+
permission to run the unmodified Program. The output from running a
|
148 |
+
covered work is covered by this License only if the output, given its
|
149 |
+
content, constitutes a covered work. This License acknowledges your
|
150 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
151 |
+
|
152 |
+
You may make, run and propagate covered works that you do not
|
153 |
+
convey, without conditions so long as your license otherwise remains
|
154 |
+
in force. You may convey covered works to others for the sole purpose
|
155 |
+
of having them make modifications exclusively for you, or provide you
|
156 |
+
with facilities for running those works, provided that you comply with
|
157 |
+
the terms of this License in conveying all material for which you do
|
158 |
+
not control copyright. Those thus making or running the covered works
|
159 |
+
for you must do so exclusively on your behalf, under your direction
|
160 |
+
and control, on terms that prohibit them from making any copies of
|
161 |
+
your copyrighted material outside their relationship with you.
|
162 |
+
|
163 |
+
Conveying under any other circumstances is permitted solely under
|
164 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
165 |
+
makes it unnecessary.
|
166 |
+
|
167 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
168 |
+
|
169 |
+
No covered work shall be deemed part of an effective technological
|
170 |
+
measure under any applicable law fulfilling obligations under article
|
171 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
172 |
+
similar laws prohibiting or restricting circumvention of such
|
173 |
+
measures.
|
174 |
+
|
175 |
+
When you convey a covered work, you waive any legal power to forbid
|
176 |
+
circumvention of technological measures to the extent such circumvention
|
177 |
+
is effected by exercising rights under this License with respect to
|
178 |
+
the covered work, and you disclaim any intention to limit operation or
|
179 |
+
modification of the work as a means of enforcing, against the work's
|
180 |
+
users, your or third parties' legal rights to forbid circumvention of
|
181 |
+
technological measures.
|
182 |
+
|
183 |
+
4. Conveying Verbatim Copies.
|
184 |
+
|
185 |
+
You may convey verbatim copies of the Program's source code as you
|
186 |
+
receive it, in any medium, provided that you conspicuously and
|
187 |
+
appropriately publish on each copy an appropriate copyright notice;
|
188 |
+
keep intact all notices stating that this License and any
|
189 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
190 |
+
keep intact all notices of the absence of any warranty; and give all
|
191 |
+
recipients a copy of this License along with the Program.
|
192 |
+
|
193 |
+
You may charge any price or no price for each copy that you convey,
|
194 |
+
and you may offer support or warranty protection for a fee.
|
195 |
+
|
196 |
+
5. Conveying Modified Source Versions.
|
197 |
+
|
198 |
+
You may convey a work based on the Program, or the modifications to
|
199 |
+
produce it from the Program, in the form of source code under the
|
200 |
+
terms of section 4, provided that you also meet all of these conditions:
|
201 |
+
|
202 |
+
a) The work must carry prominent notices stating that you modified
|
203 |
+
it, and giving a relevant date.
|
204 |
+
|
205 |
+
b) The work must carry prominent notices stating that it is
|
206 |
+
released under this License and any conditions added under section
|
207 |
+
7. This requirement modifies the requirement in section 4 to
|
208 |
+
"keep intact all notices".
|
209 |
+
|
210 |
+
c) You must license the entire work, as a whole, under this
|
211 |
+
License to anyone who comes into possession of a copy. This
|
212 |
+
License will therefore apply, along with any applicable section 7
|
213 |
+
additional terms, to the whole of the work, and all its parts,
|
214 |
+
regardless of how they are packaged. This License gives no
|
215 |
+
permission to license the work in any other way, but it does not
|
216 |
+
invalidate such permission if you have separately received it.
|
217 |
+
|
218 |
+
d) If the work has interactive user interfaces, each must display
|
219 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
220 |
+
interfaces that do not display Appropriate Legal Notices, your
|
221 |
+
work need not make them do so.
|
222 |
+
|
223 |
+
A compilation of a covered work with other separate and independent
|
224 |
+
works, which are not by their nature extensions of the covered work,
|
225 |
+
and which are not combined with it such as to form a larger program,
|
226 |
+
in or on a volume of a storage or distribution medium, is called an
|
227 |
+
"aggregate" if the compilation and its resulting copyright are not
|
228 |
+
used to limit the access or legal rights of the compilation's users
|
229 |
+
beyond what the individual works permit. Inclusion of a covered work
|
230 |
+
in an aggregate does not cause this License to apply to the other
|
231 |
+
parts of the aggregate.
|
232 |
+
|
233 |
+
6. Conveying Non-Source Forms.
|
234 |
+
|
235 |
+
You may convey a covered work in object code form under the terms
|
236 |
+
of sections 4 and 5, provided that you also convey the
|
237 |
+
machine-readable Corresponding Source under the terms of this License,
|
238 |
+
in one of these ways:
|
239 |
+
|
240 |
+
a) Convey the object code in, or embodied in, a physical product
|
241 |
+
(including a physical distribution medium), accompanied by the
|
242 |
+
Corresponding Source fixed on a durable physical medium
|
243 |
+
customarily used for software interchange.
|
244 |
+
|
245 |
+
b) Convey the object code in, or embodied in, a physical product
|
246 |
+
(including a physical distribution medium), accompanied by a
|
247 |
+
written offer, valid for at least three years and valid for as
|
248 |
+
long as you offer spare parts or customer support for that product
|
249 |
+
model, to give anyone who possesses the object code either (1) a
|
250 |
+
copy of the Corresponding Source for all the software in the
|
251 |
+
product that is covered by this License, on a durable physical
|
252 |
+
medium customarily used for software interchange, for a price no
|
253 |
+
more than your reasonable cost of physically performing this
|
254 |
+
conveying of source, or (2) access to copy the
|
255 |
+
Corresponding Source from a network server at no charge.
|
256 |
+
|
257 |
+
c) Convey individual copies of the object code with a copy of the
|
258 |
+
written offer to provide the Corresponding Source. This
|
259 |
+
alternative is allowed only occasionally and noncommercially, and
|
260 |
+
only if you received the object code with such an offer, in accord
|
261 |
+
with subsection 6b.
|
262 |
+
|
263 |
+
d) Convey the object code by offering access from a designated
|
264 |
+
place (gratis or for a charge), and offer equivalent access to the
|
265 |
+
Corresponding Source in the same way through the same place at no
|
266 |
+
further charge. You need not require recipients to copy the
|
267 |
+
Corresponding Source along with the object code. If the place to
|
268 |
+
copy the object code is a network server, the Corresponding Source
|
269 |
+
may be on a different server (operated by you or a third party)
|
270 |
+
that supports equivalent copying facilities, provided you maintain
|
271 |
+
clear directions next to the object code saying where to find the
|
272 |
+
Corresponding Source. Regardless of what server hosts the
|
273 |
+
Corresponding Source, you remain obligated to ensure that it is
|
274 |
+
available for as long as needed to satisfy these requirements.
|
275 |
+
|
276 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
277 |
+
you inform other peers where the object code and Corresponding
|
278 |
+
Source of the work are being offered to the general public at no
|
279 |
+
charge under subsection 6d.
|
280 |
+
|
281 |
+
A separable portion of the object code, whose source code is excluded
|
282 |
+
from the Corresponding Source as a System Library, need not be
|
283 |
+
included in conveying the object code work.
|
284 |
+
|
285 |
+
A "User Product" is either (1) a "consumer product", which means any
|
286 |
+
tangible personal property which is normally used for personal, family,
|
287 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
288 |
+
into a dwelling. In determining whether a product is a consumer product,
|
289 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
290 |
+
product received by a particular user, "normally used" refers to a
|
291 |
+
typical or common use of that class of product, regardless of the status
|
292 |
+
of the particular user or of the way in which the particular user
|
293 |
+
actually uses, or expects or is expected to use, the product. A product
|
294 |
+
is a consumer product regardless of whether the product has substantial
|
295 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
296 |
+
the only significant mode of use of the product.
|
297 |
+
|
298 |
+
"Installation Information" for a User Product means any methods,
|
299 |
+
procedures, authorization keys, or other information required to install
|
300 |
+
and execute modified versions of a covered work in that User Product from
|
301 |
+
a modified version of its Corresponding Source. The information must
|
302 |
+
suffice to ensure that the continued functioning of the modified object
|
303 |
+
code is in no case prevented or interfered with solely because
|
304 |
+
modification has been made.
|
305 |
+
|
306 |
+
If you convey an object code work under this section in, or with, or
|
307 |
+
specifically for use in, a User Product, and the conveying occurs as
|
308 |
+
part of a transaction in which the right of possession and use of the
|
309 |
+
User Product is transferred to the recipient in perpetuity or for a
|
310 |
+
fixed term (regardless of how the transaction is characterized), the
|
311 |
+
Corresponding Source conveyed under this section must be accompanied
|
312 |
+
by the Installation Information. But this requirement does not apply
|
313 |
+
if neither you nor any third party retains the ability to install
|
314 |
+
modified object code on the User Product (for example, the work has
|
315 |
+
been installed in ROM).
|
316 |
+
|
317 |
+
The requirement to provide Installation Information does not include a
|
318 |
+
requirement to continue to provide support service, warranty, or updates
|
319 |
+
for a work that has been modified or installed by the recipient, or for
|
320 |
+
the User Product in which it has been modified or installed. Access to a
|
321 |
+
network may be denied when the modification itself materially and
|
322 |
+
adversely affects the operation of the network or violates the rules and
|
323 |
+
protocols for communication across the network.
|
324 |
+
|
325 |
+
Corresponding Source conveyed, and Installation Information provided,
|
326 |
+
in accord with this section must be in a format that is publicly
|
327 |
+
documented (and with an implementation available to the public in
|
328 |
+
source code form), and must require no special password or key for
|
329 |
+
unpacking, reading or copying.
|
330 |
+
|
331 |
+
7. Additional Terms.
|
332 |
+
|
333 |
+
"Additional permissions" are terms that supplement the terms of this
|
334 |
+
License by making exceptions from one or more of its conditions.
|
335 |
+
Additional permissions that are applicable to the entire Program shall
|
336 |
+
be treated as though they were included in this License, to the extent
|
337 |
+
that they are valid under applicable law. If additional permissions
|
338 |
+
apply only to part of the Program, that part may be used separately
|
339 |
+
under those permissions, but the entire Program remains governed by
|
340 |
+
this License without regard to the additional permissions.
|
341 |
+
|
342 |
+
When you convey a copy of a covered work, you may at your option
|
343 |
+
remove any additional permissions from that copy, or from any part of
|
344 |
+
it. (Additional permissions may be written to require their own
|
345 |
+
removal in certain cases when you modify the work.) You may place
|
346 |
+
additional permissions on material, added by you to a covered work,
|
347 |
+
for which you have or can give appropriate copyright permission.
|
348 |
+
|
349 |
+
Notwithstanding any other provision of this License, for material you
|
350 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
351 |
+
that material) supplement the terms of this License with terms:
|
352 |
+
|
353 |
+
a) Disclaiming warranty or limiting liability differently from the
|
354 |
+
terms of sections 15 and 16 of this License; or
|
355 |
+
|
356 |
+
b) Requiring preservation of specified reasonable legal notices or
|
357 |
+
author attributions in that material or in the Appropriate Legal
|
358 |
+
Notices displayed by works containing it; or
|
359 |
+
|
360 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
361 |
+
requiring that modified versions of such material be marked in
|
362 |
+
reasonable ways as different from the original version; or
|
363 |
+
|
364 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
365 |
+
authors of the material; or
|
366 |
+
|
367 |
+
e) Declining to grant rights under trademark law for use of some
|
368 |
+
trade names, trademarks, or service marks; or
|
369 |
+
|
370 |
+
f) Requiring indemnification of licensors and authors of that
|
371 |
+
material by anyone who conveys the material (or modified versions of
|
372 |
+
it) with contractual assumptions of liability to the recipient, for
|
373 |
+
any liability that these contractual assumptions directly impose on
|
374 |
+
those licensors and authors.
|
375 |
+
|
376 |
+
All other non-permissive additional terms are considered "further
|
377 |
+
restrictions" within the meaning of section 10. If the Program as you
|
378 |
+
received it, or any part of it, contains a notice stating that it is
|
379 |
+
governed by this License along with a term that is a further
|
380 |
+
restriction, you may remove that term. If a license document contains
|
381 |
+
a further restriction but permits relicensing or conveying under this
|
382 |
+
License, you may add to a covered work material governed by the terms
|
383 |
+
of that license document, provided that the further restriction does
|
384 |
+
not survive such relicensing or conveying.
|
385 |
+
|
386 |
+
If you add terms to a covered work in accord with this section, you
|
387 |
+
must place, in the relevant source files, a statement of the
|
388 |
+
additional terms that apply to those files, or a notice indicating
|
389 |
+
where to find the applicable terms.
|
390 |
+
|
391 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
392 |
+
form of a separately written license, or stated as exceptions;
|
393 |
+
the above requirements apply either way.
|
394 |
+
|
395 |
+
8. Termination.
|
396 |
+
|
397 |
+
You may not propagate or modify a covered work except as expressly
|
398 |
+
provided under this License. Any attempt otherwise to propagate or
|
399 |
+
modify it is void, and will automatically terminate your rights under
|
400 |
+
this License (including any patent licenses granted under the third
|
401 |
+
paragraph of section 11).
|
402 |
+
|
403 |
+
However, if you cease all violation of this License, then your
|
404 |
+
license from a particular copyright holder is reinstated (a)
|
405 |
+
provisionally, unless and until the copyright holder explicitly and
|
406 |
+
finally terminates your license, and (b) permanently, if the copyright
|
407 |
+
holder fails to notify you of the violation by some reasonable means
|
408 |
+
prior to 60 days after the cessation.
|
409 |
+
|
410 |
+
Moreover, your license from a particular copyright holder is
|
411 |
+
reinstated permanently if the copyright holder notifies you of the
|
412 |
+
violation by some reasonable means, this is the first time you have
|
413 |
+
received notice of violation of this License (for any work) from that
|
414 |
+
copyright holder, and you cure the violation prior to 30 days after
|
415 |
+
your receipt of the notice.
|
416 |
+
|
417 |
+
Termination of your rights under this section does not terminate the
|
418 |
+
licenses of parties who have received copies or rights from you under
|
419 |
+
this License. If your rights have been terminated and not permanently
|
420 |
+
reinstated, you do not qualify to receive new licenses for the same
|
421 |
+
material under section 10.
|
422 |
+
|
423 |
+
9. Acceptance Not Required for Having Copies.
|
424 |
+
|
425 |
+
You are not required to accept this License in order to receive or
|
426 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
427 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
428 |
+
to receive a copy likewise does not require acceptance. However,
|
429 |
+
nothing other than this License grants you permission to propagate or
|
430 |
+
modify any covered work. These actions infringe copyright if you do
|
431 |
+
not accept this License. Therefore, by modifying or propagating a
|
432 |
+
covered work, you indicate your acceptance of this License to do so.
|
433 |
+
|
434 |
+
10. Automatic Licensing of Downstream Recipients.
|
435 |
+
|
436 |
+
Each time you convey a covered work, the recipient automatically
|
437 |
+
receives a license from the original licensors, to run, modify and
|
438 |
+
propagate that work, subject to this License. You are not responsible
|
439 |
+
for enforcing compliance by third parties with this License.
|
440 |
+
|
441 |
+
An "entity transaction" is a transaction transferring control of an
|
442 |
+
organization, or substantially all assets of one, or subdividing an
|
443 |
+
organization, or merging organizations. If propagation of a covered
|
444 |
+
work results from an entity transaction, each party to that
|
445 |
+
transaction who receives a copy of the work also receives whatever
|
446 |
+
licenses to the work the party's predecessor in interest had or could
|
447 |
+
give under the previous paragraph, plus a right to possession of the
|
448 |
+
Corresponding Source of the work from the predecessor in interest, if
|
449 |
+
the predecessor has it or can get it with reasonable efforts.
|
450 |
+
|
451 |
+
You may not impose any further restrictions on the exercise of the
|
452 |
+
rights granted or affirmed under this License. For example, you may
|
453 |
+
not impose a license fee, royalty, or other charge for exercise of
|
454 |
+
rights granted under this License, and you may not initiate litigation
|
455 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
456 |
+
any patent claim is infringed by making, using, selling, offering for
|
457 |
+
sale, or importing the Program or any portion of it.
|
458 |
+
|
459 |
+
11. Patents.
|
460 |
+
|
461 |
+
A "contributor" is a copyright holder who authorizes use under this
|
462 |
+
License of the Program or a work on which the Program is based. The
|
463 |
+
work thus licensed is called the contributor's "contributor version".
|
464 |
+
|
465 |
+
A contributor's "essential patent claims" are all patent claims
|
466 |
+
owned or controlled by the contributor, whether already acquired or
|
467 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
468 |
+
by this License, of making, using, or selling its contributor version,
|
469 |
+
but do not include claims that would be infringed only as a
|
470 |
+
consequence of further modification of the contributor version. For
|
471 |
+
purposes of this definition, "control" includes the right to grant
|
472 |
+
patent sublicenses in a manner consistent with the requirements of
|
473 |
+
this License.
|
474 |
+
|
475 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
476 |
+
patent license under the contributor's essential patent claims, to
|
477 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
478 |
+
propagate the contents of its contributor version.
|
479 |
+
|
480 |
+
In the following three paragraphs, a "patent license" is any express
|
481 |
+
agreement or commitment, however denominated, not to enforce a patent
|
482 |
+
(such as an express permission to practice a patent or covenant not to
|
483 |
+
sue for patent infringement). To "grant" such a patent license to a
|
484 |
+
party means to make such an agreement or commitment not to enforce a
|
485 |
+
patent against the party.
|
486 |
+
|
487 |
+
If you convey a covered work, knowingly relying on a patent license,
|
488 |
+
and the Corresponding Source of the work is not available for anyone
|
489 |
+
to copy, free of charge and under the terms of this License, through a
|
490 |
+
publicly available network server or other readily accessible means,
|
491 |
+
then you must either (1) cause the Corresponding Source to be so
|
492 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
493 |
+
patent license for this particular work, or (3) arrange, in a manner
|
494 |
+
consistent with the requirements of this License, to extend the patent
|
495 |
+
license to downstream recipients. "Knowingly relying" means you have
|
496 |
+
actual knowledge that, but for the patent license, your conveying the
|
497 |
+
covered work in a country, or your recipient's use of the covered work
|
498 |
+
in a country, would infringe one or more identifiable patents in that
|
499 |
+
country that you have reason to believe are valid.
|
500 |
+
|
501 |
+
If, pursuant to or in connection with a single transaction or
|
502 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
503 |
+
covered work, and grant a patent license to some of the parties
|
504 |
+
receiving the covered work authorizing them to use, propagate, modify
|
505 |
+
or convey a specific copy of the covered work, then the patent license
|
506 |
+
you grant is automatically extended to all recipients of the covered
|
507 |
+
work and works based on it.
|
508 |
+
|
509 |
+
A patent license is "discriminatory" if it does not include within
|
510 |
+
the scope of its coverage, prohibits the exercise of, or is
|
511 |
+
conditioned on the non-exercise of one or more of the rights that are
|
512 |
+
specifically granted under this License. You may not convey a covered
|
513 |
+
work if you are a party to an arrangement with a third party that is
|
514 |
+
in the business of distributing software, under which you make payment
|
515 |
+
to the third party based on the extent of your activity of conveying
|
516 |
+
the work, and under which the third party grants, to any of the
|
517 |
+
parties who would receive the covered work from you, a discriminatory
|
518 |
+
patent license (a) in connection with copies of the covered work
|
519 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
520 |
+
for and in connection with specific products or compilations that
|
521 |
+
contain the covered work, unless you entered into that arrangement,
|
522 |
+
or that patent license was granted, prior to 28 March 2007.
|
523 |
+
|
524 |
+
Nothing in this License shall be construed as excluding or limiting
|
525 |
+
any implied license or other defenses to infringement that may
|
526 |
+
otherwise be available to you under applicable patent law.
|
527 |
+
|
528 |
+
12. No Surrender of Others' Freedom.
|
529 |
+
|
530 |
+
If conditions are imposed on you (whether by court order, agreement or
|
531 |
+
otherwise) that contradict the conditions of this License, they do not
|
532 |
+
excuse you from the conditions of this License. If you cannot convey a
|
533 |
+
covered work so as to satisfy simultaneously your obligations under this
|
534 |
+
License and any other pertinent obligations, then as a consequence you may
|
535 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
536 |
+
to collect a royalty for further conveying from those to whom you convey
|
537 |
+
the Program, the only way you could satisfy both those terms and this
|
538 |
+
License would be to refrain entirely from conveying the Program.
|
539 |
+
|
540 |
+
13. Remote Network Interaction; Use with the GNU General Public License.
|
541 |
+
|
542 |
+
Notwithstanding any other provision of this License, if you modify the
|
543 |
+
Program, your modified version must prominently offer all users
|
544 |
+
interacting with it remotely through a computer network (if your version
|
545 |
+
supports such interaction) an opportunity to receive the Corresponding
|
546 |
+
Source of your version by providing access to the Corresponding Source
|
547 |
+
from a network server at no charge, through some standard or customary
|
548 |
+
means of facilitating copying of software. This Corresponding Source
|
549 |
+
shall include the Corresponding Source for any work covered by version 3
|
550 |
+
of the GNU General Public License that is incorporated pursuant to the
|
551 |
+
following paragraph.
|
552 |
+
|
553 |
+
Notwithstanding any other provision of this License, you have
|
554 |
+
permission to link or combine any covered work with a work licensed
|
555 |
+
under version 3 of the GNU General Public License into a single
|
556 |
+
combined work, and to convey the resulting work. The terms of this
|
557 |
+
License will continue to apply to the part which is the covered work,
|
558 |
+
but the work with which it is combined will remain governed by version
|
559 |
+
3 of the GNU General Public License.
|
560 |
+
|
561 |
+
14. Revised Versions of this License.
|
562 |
+
|
563 |
+
The Free Software Foundation may publish revised and/or new versions of
|
564 |
+
the GNU Affero General Public License from time to time. Such new versions
|
565 |
+
will be similar in spirit to the present version, but may differ in detail to
|
566 |
+
address new problems or concerns.
|
567 |
+
|
568 |
+
Each version is given a distinguishing version number. If the
|
569 |
+
Program specifies that a certain numbered version of the GNU Affero General
|
570 |
+
Public License "or any later version" applies to it, you have the
|
571 |
+
option of following the terms and conditions either of that numbered
|
572 |
+
version or of any later version published by the Free Software
|
573 |
+
Foundation. If the Program does not specify a version number of the
|
574 |
+
GNU Affero General Public License, you may choose any version ever published
|
575 |
+
by the Free Software Foundation.
|
576 |
+
|
577 |
+
If the Program specifies that a proxy can decide which future
|
578 |
+
versions of the GNU Affero General Public License can be used, that proxy's
|
579 |
+
public statement of acceptance of a version permanently authorizes you
|
580 |
+
to choose that version for the Program.
|
581 |
+
|
582 |
+
Later license versions may give you additional or different
|
583 |
+
permissions. However, no additional obligations are imposed on any
|
584 |
+
author or copyright holder as a result of your choosing to follow a
|
585 |
+
later version.
|
586 |
+
|
587 |
+
15. Disclaimer of Warranty.
|
588 |
+
|
589 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
590 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
591 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
592 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
593 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
594 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
595 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
596 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
597 |
+
|
598 |
+
16. Limitation of Liability.
|
599 |
+
|
600 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
601 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
602 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
603 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
604 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
605 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
606 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
607 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
608 |
+
SUCH DAMAGES.
|
609 |
+
|
610 |
+
17. Interpretation of Sections 15 and 16.
|
611 |
+
|
612 |
+
If the disclaimer of warranty and limitation of liability provided
|
613 |
+
above cannot be given local legal effect according to their terms,
|
614 |
+
reviewing courts shall apply local law that most closely approximates
|
615 |
+
an absolute waiver of all civil liability in connection with the
|
616 |
+
Program, unless a warranty or assumption of liability accompanies a
|
617 |
+
copy of the Program in return for a fee.
|
618 |
+
|
619 |
+
END OF TERMS AND CONDITIONS
|
620 |
+
|
621 |
+
How to Apply These Terms to Your New Programs
|
622 |
+
|
623 |
+
If you develop a new program, and you want it to be of the greatest
|
624 |
+
possible use to the public, the best way to achieve this is to make it
|
625 |
+
free software which everyone can redistribute and change under these terms.
|
626 |
+
|
627 |
+
To do so, attach the following notices to the program. It is safest
|
628 |
+
to attach them to the start of each source file to most effectively
|
629 |
+
state the exclusion of warranty; and each file should have at least
|
630 |
+
the "copyright" line and a pointer to where the full notice is found.
|
631 |
+
|
632 |
+
<one line to give the program's name and a brief idea of what it does.>
|
633 |
+
Copyright (C) <year> <name of author>
|
634 |
+
|
635 |
+
This program is free software: you can redistribute it and/or modify
|
636 |
+
it under the terms of the GNU Affero General Public License as published
|
637 |
+
by the Free Software Foundation, either version 3 of the License, or
|
638 |
+
(at your option) any later version.
|
639 |
+
|
640 |
+
This program is distributed in the hope that it will be useful,
|
641 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
642 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
643 |
+
GNU Affero General Public License for more details.
|
644 |
+
|
645 |
+
You should have received a copy of the GNU Affero General Public License
|
646 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
647 |
+
|
648 |
+
Also add information on how to contact you by electronic and paper mail.
|
649 |
+
|
650 |
+
If your software can interact with users remotely through a computer
|
651 |
+
network, you should also make sure that it provides a way for users to
|
652 |
+
get its source. For example, if your program is a web application, its
|
653 |
+
interface could display a "Source" link that leads users to an archive
|
654 |
+
of the code. There are many ways you could offer source, and different
|
655 |
+
solutions will be better for different programs; see section 13 for the
|
656 |
+
specific requirements.
|
657 |
+
|
658 |
+
You should also get your employer (if you work as a programmer) or school,
|
659 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
660 |
+
For more information on this, and how to apply and follow the GNU AGPL, see
|
661 |
+
<https://www.gnu.org/licenses/>.
|
README.md
CHANGED
@@ -1,13 +1,428 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Text generation web UI
|
2 |
+
|
3 |
+
A Gradio web UI for Large Language Models.
|
4 |
+
|
5 |
+
Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) of text generation.
|
6 |
+
|
7 |
+
|![Image1](https://github.com/oobabooga/screenshots/raw/main/print_instruct.png) | ![Image2](https://github.com/oobabooga/screenshots/raw/main/print_chat.png) |
|
8 |
+
|:---:|:---:|
|
9 |
+
|![Image1](https://github.com/oobabooga/screenshots/raw/main/print_default.png) | ![Image2](https://github.com/oobabooga/screenshots/raw/main/print_parameters.png) |
|
10 |
+
|
11 |
+
## Features
|
12 |
+
|
13 |
+
* 3 interface modes: default (two columns), notebook, and chat.
|
14 |
+
* Multiple model backends: [Transformers](https://github.com/huggingface/transformers), [llama.cpp](https://github.com/ggerganov/llama.cpp) (through [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)), [ExLlamaV2](https://github.com/turboderp/exllamav2), [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), [AutoAWQ](https://github.com/casper-hansen/AutoAWQ), [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa), [CTransformers](https://github.com/marella/ctransformers), [QuIP#](https://github.com/Cornell-RelaxML/quip-sharp).
|
15 |
+
* Dropdown menu for quickly switching between different models.
|
16 |
+
* Large number of extensions (built-in and user-contributed), including Coqui TTS for realistic voice outputs, Whisper STT for voice inputs, translation, [multimodal pipelines](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal), vector databases, Stable Diffusion integration, and a lot more. See [the wiki](https://github.com/oobabooga/text-generation-webui/wiki/07-%E2%80%90-Extensions) and [the extensions directory](https://github.com/oobabooga/text-generation-webui-extensions) for details.
|
17 |
+
* [Chat with custom characters](https://github.com/oobabooga/text-generation-webui/wiki/03-%E2%80%90-Parameters-Tab#character).
|
18 |
+
* Precise chat templates for instruction-following models, including Llama-2-chat, Alpaca, Vicuna, Mistral.
|
19 |
+
* LoRA: train new LoRAs with your own data, load/unload LoRAs on the fly for generation.
|
20 |
+
* Transformers library integration: load models in 4-bit or 8-bit precision through bitsandbytes, use llama.cpp with transformers samplers (`llamacpp_HF` loader), CPU inference in 32-bit precision using PyTorch.
|
21 |
+
* OpenAI-compatible API server with Chat and Completions endpoints -- see the [examples](https://github.com/oobabooga/text-generation-webui/wiki/12-%E2%80%90-OpenAI-API#examples).
|
22 |
+
|
23 |
+
## How to install
|
24 |
+
|
25 |
+
1) Clone or [download](https://github.com/oobabooga/text-generation-webui/archive/refs/heads/main.zip) the repository.
|
26 |
+
2) Run the `start_linux.sh`, `start_windows.bat`, `start_macos.sh`, or `start_wsl.bat` script depending on your OS.
|
27 |
+
3) Select your GPU vendor when asked.
|
28 |
+
4) Once the installation ends, browse to `http://localhost:7860/?__theme=dark`.
|
29 |
+
5) Have fun!
|
30 |
+
|
31 |
+
To restart the web UI in the future, just run the `start_` script again. This script creates an `installer_files` folder where it sets up the project's requirements. In case you need to reinstall the requirements, you can simply delete that folder and start the web UI again.
|
32 |
+
|
33 |
+
The script accepts command-line flags. Alternatively, you can edit the `CMD_FLAGS.txt` file with a text editor and add your flags there.
|
34 |
+
|
35 |
+
To get updates in the future, run `update_linux.sh`, `update_windows.bat`, `update_macos.sh`, or `update_wsl.bat`.
|
36 |
+
|
37 |
+
<details>
|
38 |
+
<summary>
|
39 |
+
Setup details and information about installing manually
|
40 |
+
</summary>
|
41 |
+
|
42 |
+
### One-click-installer
|
43 |
+
|
44 |
+
The script uses Miniconda to set up a Conda environment in the `installer_files` folder.
|
45 |
+
|
46 |
+
If you ever need to install something manually in the `installer_files` environment, you can launch an interactive shell using the cmd script: `cmd_linux.sh`, `cmd_windows.bat`, `cmd_macos.sh`, or `cmd_wsl.bat`.
|
47 |
+
|
48 |
+
* There is no need to run any of those scripts (`start_`, `update_`, or `cmd_`) as admin/root.
|
49 |
+
* For additional instructions about AMD and WSL setup, consult [the documentation](https://github.com/oobabooga/text-generation-webui/wiki).
|
50 |
+
* For automated installation, you can use the `GPU_CHOICE`, `USE_CUDA118`, `LAUNCH_AFTER_INSTALL`, and `INSTALL_EXTENSIONS` environment variables. For instance: `GPU_CHOICE=A USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=FALSE ./start_linux.sh`.
|
51 |
+
|
52 |
+
### Manual installation using Conda
|
53 |
+
|
54 |
+
Recommended if you have some experience with the command-line.
|
55 |
+
|
56 |
+
#### 0. Install Conda
|
57 |
+
|
58 |
+
https://docs.conda.io/en/latest/miniconda.html
|
59 |
+
|
60 |
+
On Linux or WSL, it can be automatically installed with these two commands ([source](https://educe-ubc.github.io/conda.html)):
|
61 |
+
|
62 |
+
```
|
63 |
+
curl -sL "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh" > "Miniconda3.sh"
|
64 |
+
bash Miniconda3.sh
|
65 |
+
```
|
66 |
+
|
67 |
+
#### 1. Create a new conda environment
|
68 |
+
|
69 |
+
```
|
70 |
+
conda create -n textgen python=3.11
|
71 |
+
conda activate textgen
|
72 |
+
```
|
73 |
+
|
74 |
+
#### 2. Install Pytorch
|
75 |
+
|
76 |
+
| System | GPU | Command |
|
77 |
+
|--------|---------|---------|
|
78 |
+
| Linux/WSL | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121` |
|
79 |
+
| Linux/WSL | CPU only | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu` |
|
80 |
+
| Linux | AMD | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6` |
|
81 |
+
| MacOS + MPS | Any | `pip3 install torch torchvision torchaudio` |
|
82 |
+
| Windows | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121` |
|
83 |
+
| Windows | CPU only | `pip3 install torch torchvision torchaudio` |
|
84 |
+
|
85 |
+
The up-to-date commands can be found here: https://pytorch.org/get-started/locally/.
|
86 |
+
|
87 |
+
For NVIDIA, you also need to install the CUDA runtime libraries:
|
88 |
+
|
89 |
+
```
|
90 |
+
conda install -y -c "nvidia/label/cuda-12.1.1" cuda-runtime
|
91 |
+
```
|
92 |
+
|
93 |
+
If you need `nvcc` to compile some library manually, replace the command above with
|
94 |
+
|
95 |
+
```
|
96 |
+
conda install -y -c "nvidia/label/cuda-12.1.1" cuda
|
97 |
+
```
|
98 |
+
|
99 |
+
#### 3. Install the web UI
|
100 |
+
|
101 |
+
```
|
102 |
+
git clone https://github.com/oobabooga/text-generation-webui
|
103 |
+
cd text-generation-webui
|
104 |
+
pip install -r <requirements file according to table below>
|
105 |
+
```
|
106 |
+
|
107 |
+
Requirements file to use:
|
108 |
+
|
109 |
+
| GPU | CPU | requirements file to use |
|
110 |
+
|--------|---------|---------|
|
111 |
+
| NVIDIA | has AVX2 | `requirements.txt` |
|
112 |
+
| NVIDIA | no AVX2 | `requirements_noavx2.txt` |
|
113 |
+
| AMD | has AVX2 | `requirements_amd.txt` |
|
114 |
+
| AMD | no AVX2 | `requirements_amd_noavx2.txt` |
|
115 |
+
| CPU only | has AVX2 | `requirements_cpu_only.txt` |
|
116 |
+
| CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` |
|
117 |
+
| Apple | Intel | `requirements_apple_intel.txt` |
|
118 |
+
| Apple | Apple Silicon | `requirements_apple_silicon.txt` |
|
119 |
+
|
120 |
+
### Start the web UI
|
121 |
+
|
122 |
+
```
|
123 |
+
conda activate textgen
|
124 |
+
cd text-generation-webui
|
125 |
+
python server.py
|
126 |
+
```
|
127 |
+
|
128 |
+
Then browse to
|
129 |
+
|
130 |
+
`http://localhost:7860/?__theme=dark`
|
131 |
+
|
132 |
+
##### AMD GPU on Windows
|
133 |
+
|
134 |
+
1) Use `requirements_cpu_only.txt` or `requirements_cpu_only_noavx2.txt` in the command above.
|
135 |
+
|
136 |
+
2) Manually install llama-cpp-python using the appropriate command for your hardware: [Installation from PyPI](https://github.com/abetlen/llama-cpp-python#installation-with-hardware-acceleration).
|
137 |
+
* Use the `LLAMA_HIPBLAS=on` toggle.
|
138 |
+
* Note the [Windows remarks](https://github.com/abetlen/llama-cpp-python#windows-remarks).
|
139 |
+
|
140 |
+
3) Manually install AutoGPTQ: [Installation](https://github.com/PanQiWei/AutoGPTQ#install-from-source).
|
141 |
+
* Perform the from-source installation - there are no prebuilt ROCm packages for Windows.
|
142 |
+
|
143 |
+
##### Older NVIDIA GPUs
|
144 |
+
|
145 |
+
1) For Kepler GPUs and older, you will need to install CUDA 11.8 instead of 12:
|
146 |
+
|
147 |
+
```
|
148 |
+
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
|
149 |
+
conda install -y -c "nvidia/label/cuda-11.8.0" cuda-runtime
|
150 |
+
```
|
151 |
+
|
152 |
+
2) bitsandbytes >= 0.39 may not work. In that case, to use `--load-in-8bit`, you may have to downgrade like this:
|
153 |
+
* Linux: `pip install bitsandbytes==0.38.1`
|
154 |
+
* Windows: `pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl`
|
155 |
+
|
156 |
+
##### Manual install
|
157 |
+
|
158 |
+
The `requirements*.txt` above contain various wheels precompiled through GitHub Actions. If you wish to compile things manually, or if you need to because no suitable wheels are available for your hardware, you can use `requirements_nowheels.txt` and then install your desired loaders manually.
|
159 |
+
|
160 |
+
### Alternative: Docker
|
161 |
+
|
162 |
+
```
|
163 |
+
For NVIDIA GPU:
|
164 |
+
ln -s docker/{nvidia/Dockerfile,nvidia/docker-compose.yml,.dockerignore} .
|
165 |
+
For AMD GPU:
|
166 |
+
ln -s docker/{amd/Dockerfile,intel/docker-compose.yml,.dockerignore} .
|
167 |
+
For Intel GPU:
|
168 |
+
ln -s docker/{intel/Dockerfile,amd/docker-compose.yml,.dockerignore} .
|
169 |
+
For CPU only
|
170 |
+
ln -s docker/{cpu/Dockerfile,cpu/docker-compose.yml,.dockerignore} .
|
171 |
+
cp docker/.env.example .env
|
172 |
+
#Create logs/cache dir :
|
173 |
+
mkdir -p logs cache
|
174 |
+
# Edit .env and set:
|
175 |
+
# TORCH_CUDA_ARCH_LIST based on your GPU model
|
176 |
+
# APP_RUNTIME_GID your host user's group id (run `id -g` in a terminal)
|
177 |
+
# BUILD_EXTENIONS optionally add comma separated list of extensions to build
|
178 |
+
# Edit CMD_FLAGS.txt and add in it the options you want to execute (like --listen --cpu)
|
179 |
+
#
|
180 |
+
docker compose up --build
|
181 |
+
```
|
182 |
+
|
183 |
+
* You need to have Docker Compose v2.17 or higher installed. See [this guide](https://github.com/oobabooga/text-generation-webui/wiki/09-%E2%80%90-Docker) for instructions.
|
184 |
+
* For additional docker files, check out [this repository](https://github.com/Atinoda/text-generation-webui-docker).
|
185 |
+
|
186 |
+
### Updating the requirements
|
187 |
+
|
188 |
+
From time to time, the `requirements*.txt` change. To update, use these commands:
|
189 |
+
|
190 |
+
```
|
191 |
+
conda activate textgen
|
192 |
+
cd text-generation-webui
|
193 |
+
pip install -r <requirements file that you have used> --upgrade
|
194 |
+
```
|
195 |
+
</details>
|
196 |
+
|
197 |
+
<details>
|
198 |
+
<summary>
|
199 |
+
List of command-line flags
|
200 |
+
</summary>
|
201 |
+
|
202 |
+
#### Basic settings
|
203 |
+
|
204 |
+
| Flag | Description |
|
205 |
+
|--------------------------------------------|-------------|
|
206 |
+
| `-h`, `--help` | show this help message and exit |
|
207 |
+
| `--multi-user` | Multi-user mode. Chat histories are not saved or automatically loaded. WARNING: this is likely not safe for sharing publicly. |
|
208 |
+
| `--character CHARACTER` | The name of the character to load in chat mode by default. |
|
209 |
+
| `--model MODEL` | Name of the model to load by default. |
|
210 |
+
| `--lora LORA [LORA ...]` | The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces. |
|
211 |
+
| `--model-dir MODEL_DIR` | Path to directory with all the models. |
|
212 |
+
| `--lora-dir LORA_DIR` | Path to directory with all the loras. |
|
213 |
+
| `--model-menu` | Show a model menu in the terminal when the web UI is first launched. |
|
214 |
+
| `--settings SETTINGS_FILE` | Load the default interface settings from this yaml file. See `settings-template.yaml` for an example. If you create a file called `settings.yaml`, this file will be loaded by default without the need to use the `--settings` flag. |
|
215 |
+
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
|
216 |
+
| `--verbose` | Print the prompts to the terminal. |
|
217 |
+
| `--chat-buttons` | Show buttons on the chat tab instead of a hover menu. |
|
218 |
+
|
219 |
+
#### Model loader
|
220 |
+
|
221 |
+
| Flag | Description |
|
222 |
+
|--------------------------------------------|-------------|
|
223 |
+
| `--loader LOADER` | Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, llamacpp_HF, ExLlamav2_HF, ExLlamav2, AutoGPTQ, AutoAWQ, GPTQ-for-LLaMa, ctransformers, QuIP#. |
|
224 |
+
|
225 |
+
#### Accelerate/transformers
|
226 |
+
|
227 |
+
| Flag | Description |
|
228 |
+
|---------------------------------------------|-------------|
|
229 |
+
| `--cpu` | Use the CPU to generate text. Warning: Training on CPU is extremely slow. |
|
230 |
+
| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU. |
|
231 |
+
| `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maximum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs. You can also set values in MiB like --gpu-memory 3500MiB. |
|
232 |
+
| `--cpu-memory CPU_MEMORY` | Maximum CPU memory in GiB to allocate for offloaded weights. Same as above. |
|
233 |
+
| `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |
|
234 |
+
| `--disk-cache-dir DISK_CACHE_DIR` | Directory to save the disk cache to. Defaults to "cache". |
|
235 |
+
| `--load-in-8bit` | Load the model with 8-bit precision (using bitsandbytes). |
|
236 |
+
| `--bf16` | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
|
237 |
+
| `--no-cache` | Set `use_cache` to `False` while generating text. This reduces VRAM usage slightly, but it comes at a performance cost. |
|
238 |
+
| `--trust-remote-code` | Set `trust_remote_code=True` while loading the model. Necessary for some models. |
|
239 |
+
| `--no_use_fast` | Set use_fast=False while loading the tokenizer (it's True by default). Use this if you have any problems related to use_fast. |
|
240 |
+
| `--use_flash_attention_2` | Set use_flash_attention_2=True while loading the model. |
|
241 |
+
|
242 |
+
#### bitsandbytes 4-bit
|
243 |
+
|
244 |
+
⚠️ Requires minimum compute of 7.0 on Windows at the moment.
|
245 |
+
|
246 |
+
| Flag | Description |
|
247 |
+
|---------------------------------------------|-------------|
|
248 |
+
| `--load-in-4bit` | Load the model with 4-bit precision (using bitsandbytes). |
|
249 |
+
| `--use_double_quant` | use_double_quant for 4-bit. |
|
250 |
+
| `--compute_dtype COMPUTE_DTYPE` | compute dtype for 4-bit. Valid options: bfloat16, float16, float32. |
|
251 |
+
| `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. |
|
252 |
+
|
253 |
+
#### llama.cpp
|
254 |
+
|
255 |
+
| Flag | Description |
|
256 |
+
|-------------|-------------|
|
257 |
+
| `--tensorcores` | Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only. |
|
258 |
+
| `--n_ctx N_CTX` | Size of the prompt context. |
|
259 |
+
| `--threads` | Number of threads to use. |
|
260 |
+
| `--threads-batch THREADS_BATCH` | Number of threads to use for batches/prompt processing. |
|
261 |
+
| `--no_mul_mat_q` | Disable the mulmat kernels. |
|
262 |
+
| `--n_batch` | Maximum number of prompt tokens to batch together when calling llama_eval. |
|
263 |
+
| `--no-mmap` | Prevent mmap from being used. |
|
264 |
+
| `--mlock` | Force the system to keep the model in RAM. |
|
265 |
+
| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. |
|
266 |
+
| `--tensor_split TENSOR_SPLIT` | Split the model across multiple GPUs. Comma-separated list of proportions. Example: 18,17. |
|
267 |
+
| `--numa` | Activate NUMA task allocation for llama.cpp. |
|
268 |
+
| `--logits_all`| Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower. |
|
269 |
+
| `--no_offload_kqv` | Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance. |
|
270 |
+
| `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. |
|
271 |
+
|
272 |
+
#### ExLlamav2
|
273 |
+
|
274 |
+
| Flag | Description |
|
275 |
+
|------------------|-------------|
|
276 |
+
|`--gpu-split` | Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7. |
|
277 |
+
|`--max_seq_len MAX_SEQ_LEN` | Maximum sequence length. |
|
278 |
+
|`--cfg-cache` | ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader. |
|
279 |
+
|`--no_flash_attn` | Force flash-attention to not be used. |
|
280 |
+
|`--cache_8bit` | Use 8-bit cache to save VRAM. |
|
281 |
+
|`--num_experts_per_token NUM_EXPERTS_PER_TOKEN` | Number of experts to use for generation. Applies to MoE models like Mixtral. |
|
282 |
+
|
283 |
+
#### AutoGPTQ
|
284 |
+
|
285 |
+
| Flag | Description |
|
286 |
+
|------------------|-------------|
|
287 |
+
| `--triton` | Use triton. |
|
288 |
+
| `--no_inject_fused_attention` | Disable the use of fused attention, which will use less VRAM at the cost of slower inference. |
|
289 |
+
| `--no_inject_fused_mlp` | Triton mode only: disable the use of fused MLP, which will use less VRAM at the cost of slower inference. |
|
290 |
+
| `--no_use_cuda_fp16` | This can make models faster on some systems. |
|
291 |
+
| `--desc_act` | For models that don't have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig. |
|
292 |
+
| `--disable_exllama` | Disable ExLlama kernel, which can improve inference speed on some systems. |
|
293 |
+
| `--disable_exllamav2` | Disable ExLlamav2 kernel. |
|
294 |
+
|
295 |
+
#### GPTQ-for-LLaMa
|
296 |
+
|
297 |
+
| Flag | Description |
|
298 |
+
|---------------------------|-------------|
|
299 |
+
| `--wbits WBITS` | Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported. |
|
300 |
+
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. |
|
301 |
+
| `--groupsize GROUPSIZE` | Group size. |
|
302 |
+
| `--pre_layer PRE_LAYER [PRE_LAYER ...]` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg `--pre_layer 30 60`. |
|
303 |
+
| `--checkpoint CHECKPOINT` | The path to the quantized checkpoint file. If not specified, it will be automatically detected. |
|
304 |
+
| `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models. |
|
305 |
+
|
306 |
+
#### ctransformers
|
307 |
+
|
308 |
+
| Flag | Description |
|
309 |
+
|-------------|-------------|
|
310 |
+
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently gpt2, gptj, gptneox, falcon, llama, mpt, starcoder (gptbigcode), dollyv2, and replit are supported. |
|
311 |
+
|
312 |
+
#### HQQ
|
313 |
+
|
314 |
+
| Flag | Description |
|
315 |
+
|-------------|-------------|
|
316 |
+
| `--hqq-backend` | Backend for the HQQ loader. Valid options: PYTORCH, PYTORCH_COMPILE, ATEN. |
|
317 |
+
|
318 |
+
#### DeepSpeed
|
319 |
+
|
320 |
+
| Flag | Description |
|
321 |
+
|---------------------------------------|-------------|
|
322 |
+
| `--deepspeed` | Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration. |
|
323 |
+
| `--nvme-offload-dir NVME_OFFLOAD_DIR` | DeepSpeed: Directory to use for ZeRO-3 NVME offloading. |
|
324 |
+
| `--local_rank LOCAL_RANK` | DeepSpeed: Optional argument for distributed setups. |
|
325 |
+
|
326 |
+
#### RoPE (for llama.cpp, ExLlamaV2, and transformers)
|
327 |
+
|
328 |
+
| Flag | Description |
|
329 |
+
|------------------|-------------|
|
330 |
+
| `--alpha_value ALPHA_VALUE` | Positional embeddings alpha factor for NTK RoPE scaling. Use either this or `compress_pos_emb`, not both. |
|
331 |
+
| `--rope_freq_base ROPE_FREQ_BASE` | If greater than 0, will be used instead of alpha_value. Those two are related by `rope_freq_base = 10000 * alpha_value ^ (64 / 63)`. |
|
332 |
+
| `--compress_pos_emb COMPRESS_POS_EMB` | Positional embeddings compression factor. Should be set to `(context length) / (model's original context length)`. Equal to `1/rope_freq_scale`. |
|
333 |
+
|
334 |
+
#### Gradio
|
335 |
+
|
336 |
+
| Flag | Description |
|
337 |
+
|---------------------------------------|-------------|
|
338 |
+
| `--listen` | Make the web UI reachable from your local network. |
|
339 |
+
| `--listen-port LISTEN_PORT` | The listening port that the server will use. |
|
340 |
+
| `--listen-host LISTEN_HOST` | The hostname that the server will use. |
|
341 |
+
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
|
342 |
+
| `--auto-launch` | Open the web UI in the default browser upon launch. |
|
343 |
+
| `--gradio-auth USER:PWD` | Set Gradio authentication password in the format "username:password". Multiple credentials can also be supplied with "u1:p1,u2:p2,u3:p3". |
|
344 |
+
| `--gradio-auth-path GRADIO_AUTH_PATH` | Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above. |
|
345 |
+
| `--ssl-keyfile SSL_KEYFILE` | The path to the SSL certificate key file. |
|
346 |
+
| `--ssl-certfile SSL_CERTFILE` | The path to the SSL certificate cert file. |
|
347 |
+
|
348 |
+
#### API
|
349 |
+
|
350 |
+
| Flag | Description |
|
351 |
+
|---------------------------------------|-------------|
|
352 |
+
| `--api` | Enable the API extension. |
|
353 |
+
| `--public-api` | Create a public URL for the API using Cloudfare. |
|
354 |
+
| `--public-api-id PUBLIC_API_ID` | Tunnel ID for named Cloudflare Tunnel. Use together with public-api option. |
|
355 |
+
| `--api-port API_PORT` | The listening port for the API. |
|
356 |
+
| `--api-key API_KEY` | API authentication key. |
|
357 |
+
| `--admin-key ADMIN_KEY` | API authentication key for admin tasks like loading and unloading models. If not set, will be the same as --api-key. |
|
358 |
+
| `--nowebui` | Do not launch the Gradio UI. Useful for launching the API in standalone mode. |
|
359 |
+
|
360 |
+
#### Multimodal
|
361 |
+
|
362 |
+
| Flag | Description |
|
363 |
+
|---------------------------------------|-------------|
|
364 |
+
| `--multimodal-pipeline PIPELINE` | The multimodal pipeline to use. Examples: `llava-7b`, `llava-13b`. |
|
365 |
+
|
366 |
+
</details>
|
367 |
+
|
368 |
+
## Documentation
|
369 |
+
|
370 |
+
https://github.com/oobabooga/text-generation-webui/wiki
|
371 |
+
|
372 |
+
## Downloading models
|
373 |
+
|
374 |
+
Models should be placed in the folder `text-generation-webui/models`. They are usually downloaded from [Hugging Face](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads).
|
375 |
+
|
376 |
+
* GGUF models are a single file and should be placed directly into `models`. Example:
|
377 |
+
|
378 |
+
```
|
379 |
+
text-generation-webui
|
380 |
+
└── models
|
381 |
+
└── llama-2-13b-chat.Q4_K_M.gguf
|
382 |
+
```
|
383 |
+
|
384 |
+
* The remaining model types (like 16-bit transformers models and GPTQ models) are made of several files and must be placed in a subfolder. Example:
|
385 |
+
|
386 |
+
```
|
387 |
+
text-generation-webui
|
388 |
+
├── models
|
389 |
+
│ ├── lmsys_vicuna-33b-v1.3
|
390 |
+
│ │ ├── config.json
|
391 |
+
│ │ ├── generation_config.json
|
392 |
+
│ │ ├── pytorch_model-00001-of-00007.bin
|
393 |
+
│ │ ├── pytorch_model-00002-of-00007.bin
|
394 |
+
│ │ ├── pytorch_model-00003-of-00007.bin
|
395 |
+
│ │ ├── pytorch_model-00004-of-00007.bin
|
396 |
+
│ │ ├── pytorch_model-00005-of-00007.bin
|
397 |
+
│ │ ├── pytorch_model-00006-of-00007.bin
|
398 |
+
│ │ ├── pytorch_model-00007-of-00007.bin
|
399 |
+
│ │ ├── pytorch_model.bin.index.json
|
400 |
+
│ │ ├── special_tokens_map.json
|
401 |
+
│ │ ├── tokenizer_config.json
|
402 |
+
│ │ └── tokenizer.model
|
403 |
+
```
|
404 |
+
|
405 |
+
In both cases, you can use the "Model" tab of the UI to download the model from Hugging Face automatically. It is also possible to download it via the command-line with
|
406 |
+
|
407 |
+
```
|
408 |
+
python download-model.py organization/model
|
409 |
+
```
|
410 |
+
|
411 |
+
Run `python download-model.py --help` to see all the options.
|
412 |
+
|
413 |
+
## Google Colab notebook
|
414 |
+
|
415 |
+
https://colab.research.google.com/github/oobabooga/text-generation-webui/blob/main/Colab-TextGen-GPU.ipynb
|
416 |
+
|
417 |
+
## Contributing
|
418 |
+
|
419 |
+
If you would like to contribute to the project, check out the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
|
420 |
+
|
421 |
+
## Community
|
422 |
+
|
423 |
+
* Subreddit: https://www.reddit.com/r/oobabooga/
|
424 |
+
* Discord: https://discord.gg/jwZCF2dPQN
|
425 |
+
|
426 |
+
## Acknowledgment
|
427 |
+
|
428 |
+
In August 2023, [Andreessen Horowitz](https://a16z.com/) (a16z) provided a generous grant to encourage and support my independent work on this project. I am **extremely** grateful for their trust and recognition.
|
cmd_linux.sh
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# config
|
11 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
+
|
14 |
+
# environment isolation
|
15 |
+
export PYTHONNOUSERSITE=1
|
16 |
+
unset PYTHONPATH
|
17 |
+
unset PYTHONHOME
|
18 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
+
export CUDA_HOME="$CUDA_PATH"
|
20 |
+
|
21 |
+
# activate env
|
22 |
+
bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"")
|
cmd_macos.sh
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# config
|
11 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
+
|
14 |
+
# environment isolation
|
15 |
+
export PYTHONNOUSERSITE=1
|
16 |
+
unset PYTHONPATH
|
17 |
+
unset PYTHONHOME
|
18 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
+
export CUDA_HOME="$CUDA_PATH"
|
20 |
+
|
21 |
+
# activate env
|
22 |
+
source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh
|
23 |
+
conda activate $INSTALL_ENV_DIR
|
24 |
+
exec bash --norc
|
cmd_windows.bat
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
|
8 |
+
|
9 |
+
@rem fix failed install when installing to a separate drive
|
10 |
+
set TMP=%cd%\installer_files
|
11 |
+
set TEMP=%cd%\installer_files
|
12 |
+
|
13 |
+
@rem deactivate existing conda envs as needed to avoid conflicts
|
14 |
+
(call conda deactivate && call conda deactivate && call conda deactivate) 2>nul
|
15 |
+
|
16 |
+
@rem config
|
17 |
+
set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
|
18 |
+
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
19 |
+
|
20 |
+
@rem environment isolation
|
21 |
+
set PYTHONNOUSERSITE=1
|
22 |
+
set PYTHONPATH=
|
23 |
+
set PYTHONHOME=
|
24 |
+
set "CUDA_PATH=%INSTALL_ENV_DIR%"
|
25 |
+
set "CUDA_HOME=%CUDA_PATH%"
|
26 |
+
|
27 |
+
@rem activate installer env
|
28 |
+
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
|
29 |
+
|
30 |
+
@rem enter commands
|
31 |
+
cmd /k "%*"
|
32 |
+
|
33 |
+
:end
|
34 |
+
pause
|
cmd_wsl.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script
|
8 |
+
call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh cmd"
|
9 |
+
|
10 |
+
:end
|
11 |
+
pause
|
convert-to-safetensors.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
|
3 |
+
Converts a transformers model to safetensors format and shards it.
|
4 |
+
|
5 |
+
This makes it faster to load (because of safetensors) and lowers its RAM usage
|
6 |
+
while loading (because of sharding).
|
7 |
+
|
8 |
+
Based on the original script by 81300:
|
9 |
+
|
10 |
+
https://gist.github.com/81300/fe5b08bff1cba45296a829b9d6b0f303
|
11 |
+
|
12 |
+
'''
|
13 |
+
|
14 |
+
import argparse
|
15 |
+
from pathlib import Path
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
19 |
+
|
20 |
+
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54))
|
21 |
+
parser.add_argument('MODEL', type=str, default=None, nargs='?', help="Path to the input model.")
|
22 |
+
parser.add_argument('--output', type=str, default=None, help='Path to the output folder (default: models/{model_name}_safetensors).')
|
23 |
+
parser.add_argument("--max-shard-size", type=str, default="2GB", help="Maximum size of a shard in GB or MB (default: %(default)s).")
|
24 |
+
parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
|
25 |
+
args = parser.parse_args()
|
26 |
+
|
27 |
+
if __name__ == '__main__':
|
28 |
+
path = Path(args.MODEL)
|
29 |
+
model_name = path.name
|
30 |
+
|
31 |
+
print(f"Loading {model_name}...")
|
32 |
+
model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if args.bf16 else torch.float16)
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained(path)
|
34 |
+
|
35 |
+
out_folder = args.output or Path(f"models/{model_name}_safetensors")
|
36 |
+
print(f"Saving the converted model to {out_folder} with a maximum shard size of {args.max_shard_size}...")
|
37 |
+
model.save_pretrained(out_folder, max_shard_size=args.max_shard_size, safe_serialization=True)
|
38 |
+
tokenizer.save_pretrained(out_folder)
|
download-model.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Downloads models from Hugging Face to models/username_modelname.
|
3 |
+
|
4 |
+
Example:
|
5 |
+
python download-model.py facebook/opt-1.3b
|
6 |
+
|
7 |
+
'''
|
8 |
+
|
9 |
+
import argparse
|
10 |
+
import base64
|
11 |
+
import datetime
|
12 |
+
import hashlib
|
13 |
+
import json
|
14 |
+
import os
|
15 |
+
import re
|
16 |
+
import sys
|
17 |
+
from pathlib import Path
|
18 |
+
|
19 |
+
import requests
|
20 |
+
import tqdm
|
21 |
+
from requests.adapters import HTTPAdapter
|
22 |
+
from tqdm.contrib.concurrent import thread_map
|
23 |
+
|
24 |
+
base = "https://huggingface.co"
|
25 |
+
|
26 |
+
|
27 |
+
class ModelDownloader:
|
28 |
+
def __init__(self, max_retries=5):
|
29 |
+
self.session = requests.Session()
|
30 |
+
if max_retries:
|
31 |
+
self.session.mount('https://cdn-lfs.huggingface.co', HTTPAdapter(max_retries=max_retries))
|
32 |
+
self.session.mount('https://huggingface.co', HTTPAdapter(max_retries=max_retries))
|
33 |
+
|
34 |
+
if os.getenv('HF_USER') is not None and os.getenv('HF_PASS') is not None:
|
35 |
+
self.session.auth = (os.getenv('HF_USER'), os.getenv('HF_PASS'))
|
36 |
+
|
37 |
+
try:
|
38 |
+
from huggingface_hub import get_token
|
39 |
+
token = get_token()
|
40 |
+
except ImportError:
|
41 |
+
token = os.getenv("HF_TOKEN")
|
42 |
+
|
43 |
+
if token is not None:
|
44 |
+
self.session.headers = {'authorization': f'Bearer {token}'}
|
45 |
+
|
46 |
+
def sanitize_model_and_branch_names(self, model, branch):
|
47 |
+
if model[-1] == '/':
|
48 |
+
model = model[:-1]
|
49 |
+
|
50 |
+
if model.startswith(base + '/'):
|
51 |
+
model = model[len(base) + 1:]
|
52 |
+
|
53 |
+
model_parts = model.split(":")
|
54 |
+
model = model_parts[0] if len(model_parts) > 0 else model
|
55 |
+
branch = model_parts[1] if len(model_parts) > 1 else branch
|
56 |
+
|
57 |
+
if branch is None:
|
58 |
+
branch = "main"
|
59 |
+
else:
|
60 |
+
pattern = re.compile(r"^[a-zA-Z0-9._-]+$")
|
61 |
+
if not pattern.match(branch):
|
62 |
+
raise ValueError(
|
63 |
+
"Invalid branch name. Only alphanumeric characters, period, underscore and dash are allowed.")
|
64 |
+
|
65 |
+
return model, branch
|
66 |
+
|
67 |
+
def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):
|
68 |
+
page = f"/api/models/{model}/tree/{branch}"
|
69 |
+
cursor = b""
|
70 |
+
|
71 |
+
links = []
|
72 |
+
sha256 = []
|
73 |
+
classifications = []
|
74 |
+
has_pytorch = False
|
75 |
+
has_pt = False
|
76 |
+
has_gguf = False
|
77 |
+
has_safetensors = False
|
78 |
+
is_lora = False
|
79 |
+
while True:
|
80 |
+
url = f"{base}{page}" + (f"?cursor={cursor.decode()}" if cursor else "")
|
81 |
+
r = self.session.get(url, timeout=10)
|
82 |
+
r.raise_for_status()
|
83 |
+
content = r.content
|
84 |
+
|
85 |
+
dict = json.loads(content)
|
86 |
+
if len(dict) == 0:
|
87 |
+
break
|
88 |
+
|
89 |
+
for i in range(len(dict)):
|
90 |
+
fname = dict[i]['path']
|
91 |
+
if specific_file not in [None, ''] and fname != specific_file:
|
92 |
+
continue
|
93 |
+
|
94 |
+
if not is_lora and fname.endswith(('adapter_config.json', 'adapter_model.bin')):
|
95 |
+
is_lora = True
|
96 |
+
|
97 |
+
is_pytorch = re.match(r"(pytorch|adapter|gptq)_model.*\.bin", fname)
|
98 |
+
is_safetensors = re.match(r".*\.safetensors", fname)
|
99 |
+
is_pt = re.match(r".*\.pt", fname)
|
100 |
+
is_gguf = re.match(r'.*\.gguf', fname)
|
101 |
+
is_tiktoken = re.match(r".*\.tiktoken", fname)
|
102 |
+
is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname) or is_tiktoken
|
103 |
+
is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
|
104 |
+
if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)):
|
105 |
+
if 'lfs' in dict[i]:
|
106 |
+
sha256.append([fname, dict[i]['lfs']['oid']])
|
107 |
+
|
108 |
+
if is_text:
|
109 |
+
links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
|
110 |
+
classifications.append('text')
|
111 |
+
continue
|
112 |
+
|
113 |
+
if not text_only:
|
114 |
+
links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
|
115 |
+
if is_safetensors:
|
116 |
+
has_safetensors = True
|
117 |
+
classifications.append('safetensors')
|
118 |
+
elif is_pytorch:
|
119 |
+
has_pytorch = True
|
120 |
+
classifications.append('pytorch')
|
121 |
+
elif is_pt:
|
122 |
+
has_pt = True
|
123 |
+
classifications.append('pt')
|
124 |
+
elif is_gguf:
|
125 |
+
has_gguf = True
|
126 |
+
classifications.append('gguf')
|
127 |
+
|
128 |
+
cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
|
129 |
+
cursor = base64.b64encode(cursor)
|
130 |
+
cursor = cursor.replace(b'=', b'%3D')
|
131 |
+
|
132 |
+
# If both pytorch and safetensors are available, download safetensors only
|
133 |
+
if (has_pytorch or has_pt) and has_safetensors:
|
134 |
+
for i in range(len(classifications) - 1, -1, -1):
|
135 |
+
if classifications[i] in ['pytorch', 'pt']:
|
136 |
+
links.pop(i)
|
137 |
+
|
138 |
+
# For GGUF, try to download only the Q4_K_M if no specific file is specified.
|
139 |
+
# If not present, exclude all GGUFs, as that's likely a repository with both
|
140 |
+
# GGUF and fp16 files.
|
141 |
+
if has_gguf and specific_file is None:
|
142 |
+
has_q4km = False
|
143 |
+
for i in range(len(classifications) - 1, -1, -1):
|
144 |
+
if 'q4_k_m' in links[i].lower():
|
145 |
+
has_q4km = True
|
146 |
+
|
147 |
+
if has_q4km:
|
148 |
+
for i in range(len(classifications) - 1, -1, -1):
|
149 |
+
if 'q4_k_m' not in links[i].lower():
|
150 |
+
links.pop(i)
|
151 |
+
else:
|
152 |
+
for i in range(len(classifications) - 1, -1, -1):
|
153 |
+
if links[i].lower().endswith('.gguf'):
|
154 |
+
links.pop(i)
|
155 |
+
|
156 |
+
is_llamacpp = has_gguf and specific_file is not None
|
157 |
+
return links, sha256, is_lora, is_llamacpp
|
158 |
+
|
159 |
+
def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):
|
160 |
+
if base_folder is None:
|
161 |
+
base_folder = 'models' if not is_lora else 'loras'
|
162 |
+
|
163 |
+
# If the model is of type GGUF, save directly in the base_folder
|
164 |
+
if is_llamacpp:
|
165 |
+
return Path(base_folder)
|
166 |
+
|
167 |
+
output_folder = f"{'_'.join(model.split('/')[-2:])}"
|
168 |
+
if branch != 'main':
|
169 |
+
output_folder += f'_{branch}'
|
170 |
+
|
171 |
+
output_folder = Path(base_folder) / output_folder
|
172 |
+
return output_folder
|
173 |
+
|
174 |
+
def get_single_file(self, url, output_folder, start_from_scratch=False):
|
175 |
+
filename = Path(url.rsplit('/', 1)[1])
|
176 |
+
output_path = output_folder / filename
|
177 |
+
headers = {}
|
178 |
+
mode = 'wb'
|
179 |
+
if output_path.exists() and not start_from_scratch:
|
180 |
+
|
181 |
+
# Check if the file has already been downloaded completely
|
182 |
+
r = self.session.get(url, stream=True, timeout=10)
|
183 |
+
total_size = int(r.headers.get('content-length', 0))
|
184 |
+
if output_path.stat().st_size >= total_size:
|
185 |
+
return
|
186 |
+
|
187 |
+
# Otherwise, resume the download from where it left off
|
188 |
+
headers = {'Range': f'bytes={output_path.stat().st_size}-'}
|
189 |
+
mode = 'ab'
|
190 |
+
|
191 |
+
with self.session.get(url, stream=True, headers=headers, timeout=10) as r:
|
192 |
+
r.raise_for_status() # Do not continue the download if the request was unsuccessful
|
193 |
+
total_size = int(r.headers.get('content-length', 0))
|
194 |
+
block_size = 1024 * 1024 # 1MB
|
195 |
+
|
196 |
+
tqdm_kwargs = {
|
197 |
+
'total': total_size,
|
198 |
+
'unit': 'iB',
|
199 |
+
'unit_scale': True,
|
200 |
+
'bar_format': '{l_bar}{bar}| {n_fmt:6}/{total_fmt:6} {rate_fmt:6}'
|
201 |
+
}
|
202 |
+
|
203 |
+
if 'COLAB_GPU' in os.environ:
|
204 |
+
tqdm_kwargs.update({
|
205 |
+
'position': 0,
|
206 |
+
'leave': True
|
207 |
+
})
|
208 |
+
|
209 |
+
with open(output_path, mode) as f:
|
210 |
+
with tqdm.tqdm(**tqdm_kwargs) as t:
|
211 |
+
count = 0
|
212 |
+
for data in r.iter_content(block_size):
|
213 |
+
t.update(len(data))
|
214 |
+
f.write(data)
|
215 |
+
if total_size != 0 and self.progress_bar is not None:
|
216 |
+
count += len(data)
|
217 |
+
self.progress_bar(float(count) / float(total_size), f"{filename}")
|
218 |
+
|
219 |
+
def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=4):
|
220 |
+
thread_map(lambda url: self.get_single_file(url, output_folder, start_from_scratch=start_from_scratch), file_list, max_workers=threads, disable=True)
|
221 |
+
|
222 |
+
def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=4, specific_file=None, is_llamacpp=False):
|
223 |
+
self.progress_bar = progress_bar
|
224 |
+
|
225 |
+
# Create the folder and writing the metadata
|
226 |
+
output_folder.mkdir(parents=True, exist_ok=True)
|
227 |
+
|
228 |
+
if not is_llamacpp:
|
229 |
+
metadata = f'url: https://huggingface.co/{model}\n' \
|
230 |
+
f'branch: {branch}\n' \
|
231 |
+
f'download date: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
|
232 |
+
|
233 |
+
sha256_str = '\n'.join([f' {item[1]} {item[0]}' for item in sha256])
|
234 |
+
if sha256_str:
|
235 |
+
metadata += f'sha256sum:\n{sha256_str}'
|
236 |
+
|
237 |
+
metadata += '\n'
|
238 |
+
(output_folder / 'huggingface-metadata.txt').write_text(metadata)
|
239 |
+
|
240 |
+
if specific_file:
|
241 |
+
print(f"Downloading {specific_file} to {output_folder}")
|
242 |
+
else:
|
243 |
+
print(f"Downloading the model to {output_folder}")
|
244 |
+
|
245 |
+
self.start_download_threads(links, output_folder, start_from_scratch=start_from_scratch, threads=threads)
|
246 |
+
|
247 |
+
def check_model_files(self, model, branch, links, sha256, output_folder):
|
248 |
+
# Validate the checksums
|
249 |
+
validated = True
|
250 |
+
for i in range(len(sha256)):
|
251 |
+
fpath = (output_folder / sha256[i][0])
|
252 |
+
|
253 |
+
if not fpath.exists():
|
254 |
+
print(f"The following file is missing: {fpath}")
|
255 |
+
validated = False
|
256 |
+
continue
|
257 |
+
|
258 |
+
with open(output_folder / sha256[i][0], "rb") as f:
|
259 |
+
file_hash = hashlib.file_digest(f, "sha256").hexdigest()
|
260 |
+
if file_hash != sha256[i][1]:
|
261 |
+
print(f'Checksum failed: {sha256[i][0]} {sha256[i][1]}')
|
262 |
+
validated = False
|
263 |
+
else:
|
264 |
+
print(f'Checksum validated: {sha256[i][0]} {sha256[i][1]}')
|
265 |
+
|
266 |
+
if validated:
|
267 |
+
print('[+] Validated checksums of all model files!')
|
268 |
+
else:
|
269 |
+
print('[-] Invalid checksums. Rerun download-model.py with the --clean flag.')
|
270 |
+
|
271 |
+
|
272 |
+
if __name__ == '__main__':
|
273 |
+
|
274 |
+
parser = argparse.ArgumentParser()
|
275 |
+
parser.add_argument('MODEL', type=str, default=None, nargs='?')
|
276 |
+
parser.add_argument('--branch', type=str, default='main', help='Name of the Git branch to download from.')
|
277 |
+
parser.add_argument('--threads', type=int, default=4, help='Number of files to download simultaneously.')
|
278 |
+
parser.add_argument('--text-only', action='store_true', help='Only download text files (txt/json).')
|
279 |
+
parser.add_argument('--specific-file', type=str, default=None, help='Name of the specific file to download (if not provided, downloads all).')
|
280 |
+
parser.add_argument('--output', type=str, default=None, help='The folder where the model should be saved.')
|
281 |
+
parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.')
|
282 |
+
parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.')
|
283 |
+
parser.add_argument('--max-retries', type=int, default=5, help='Max retries count when get error in download time.')
|
284 |
+
args = parser.parse_args()
|
285 |
+
|
286 |
+
branch = args.branch
|
287 |
+
model = args.MODEL
|
288 |
+
specific_file = args.specific_file
|
289 |
+
|
290 |
+
if model is None:
|
291 |
+
print("Error: Please specify the model you'd like to download (e.g. 'python download-model.py facebook/opt-1.3b').")
|
292 |
+
sys.exit()
|
293 |
+
|
294 |
+
downloader = ModelDownloader(max_retries=args.max_retries)
|
295 |
+
# Clean up the model/branch names
|
296 |
+
try:
|
297 |
+
model, branch = downloader.sanitize_model_and_branch_names(model, branch)
|
298 |
+
except ValueError as err_branch:
|
299 |
+
print(f"Error: {err_branch}")
|
300 |
+
sys.exit()
|
301 |
+
|
302 |
+
# Get the download links from Hugging Face
|
303 |
+
links, sha256, is_lora, is_llamacpp = downloader.get_download_links_from_huggingface(model, branch, text_only=args.text_only, specific_file=specific_file)
|
304 |
+
|
305 |
+
# Get the output folder
|
306 |
+
output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp, base_folder=args.output)
|
307 |
+
|
308 |
+
if args.check:
|
309 |
+
# Check previously downloaded files
|
310 |
+
downloader.check_model_files(model, branch, links, sha256, output_folder)
|
311 |
+
else:
|
312 |
+
# Download files
|
313 |
+
downloader.download_model_files(model, branch, links, sha256, output_folder, specific_file=specific_file, threads=args.threads, is_llamacpp=is_llamacpp)
|
one_click.py
ADDED
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import glob
|
3 |
+
import hashlib
|
4 |
+
import os
|
5 |
+
import platform
|
6 |
+
import re
|
7 |
+
import signal
|
8 |
+
import site
|
9 |
+
import subprocess
|
10 |
+
import sys
|
11 |
+
|
12 |
+
script_dir = os.getcwd()
|
13 |
+
conda_env_path = os.path.join(script_dir, "installer_files", "env")
|
14 |
+
|
15 |
+
# Remove the '# ' from the following lines as needed for your AMD GPU on Linux
|
16 |
+
# os.environ["ROCM_PATH"] = '/opt/rocm'
|
17 |
+
# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0'
|
18 |
+
# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030'
|
19 |
+
|
20 |
+
# Command-line flags
|
21 |
+
cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt")
|
22 |
+
if os.path.exists(cmd_flags_path):
|
23 |
+
with open(cmd_flags_path, 'r') as f:
|
24 |
+
CMD_FLAGS = ' '.join(line.strip().rstrip('\\').strip() for line in f if line.strip().rstrip('\\').strip() and not line.strip().startswith('#'))
|
25 |
+
else:
|
26 |
+
CMD_FLAGS = ''
|
27 |
+
|
28 |
+
flags = f"{' '.join([flag for flag in sys.argv[1:] if flag != '--update'])} {CMD_FLAGS}"
|
29 |
+
|
30 |
+
|
31 |
+
def signal_handler(sig, frame):
|
32 |
+
sys.exit(0)
|
33 |
+
|
34 |
+
|
35 |
+
signal.signal(signal.SIGINT, signal_handler)
|
36 |
+
|
37 |
+
|
38 |
+
def is_linux():
|
39 |
+
return sys.platform.startswith("linux")
|
40 |
+
|
41 |
+
|
42 |
+
def is_windows():
|
43 |
+
return sys.platform.startswith("win")
|
44 |
+
|
45 |
+
|
46 |
+
def is_macos():
|
47 |
+
return sys.platform.startswith("darwin")
|
48 |
+
|
49 |
+
|
50 |
+
def is_x86_64():
|
51 |
+
return platform.machine() == "x86_64"
|
52 |
+
|
53 |
+
|
54 |
+
def cpu_has_avx2():
|
55 |
+
try:
|
56 |
+
import cpuinfo
|
57 |
+
|
58 |
+
info = cpuinfo.get_cpu_info()
|
59 |
+
if 'avx2' in info['flags']:
|
60 |
+
return True
|
61 |
+
else:
|
62 |
+
return False
|
63 |
+
except:
|
64 |
+
return True
|
65 |
+
|
66 |
+
|
67 |
+
def cpu_has_amx():
|
68 |
+
try:
|
69 |
+
import cpuinfo
|
70 |
+
|
71 |
+
info = cpuinfo.get_cpu_info()
|
72 |
+
if 'amx' in info['flags']:
|
73 |
+
return True
|
74 |
+
else:
|
75 |
+
return False
|
76 |
+
except:
|
77 |
+
return True
|
78 |
+
|
79 |
+
|
80 |
+
def torch_version():
|
81 |
+
site_packages_path = None
|
82 |
+
for sitedir in site.getsitepackages():
|
83 |
+
if "site-packages" in sitedir and conda_env_path in sitedir:
|
84 |
+
site_packages_path = sitedir
|
85 |
+
break
|
86 |
+
|
87 |
+
if site_packages_path:
|
88 |
+
torch_version_file = open(os.path.join(site_packages_path, 'torch', 'version.py')).read().splitlines()
|
89 |
+
torver = [line for line in torch_version_file if '__version__' in line][0].split('__version__ = ')[1].strip("'")
|
90 |
+
else:
|
91 |
+
from torch import __version__ as torver
|
92 |
+
|
93 |
+
return torver
|
94 |
+
|
95 |
+
|
96 |
+
def is_installed():
|
97 |
+
site_packages_path = None
|
98 |
+
for sitedir in site.getsitepackages():
|
99 |
+
if "site-packages" in sitedir and conda_env_path in sitedir:
|
100 |
+
site_packages_path = sitedir
|
101 |
+
break
|
102 |
+
|
103 |
+
if site_packages_path:
|
104 |
+
return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py'))
|
105 |
+
else:
|
106 |
+
return os.path.isdir(conda_env_path)
|
107 |
+
|
108 |
+
|
109 |
+
def check_env():
|
110 |
+
# If we have access to conda, we are probably in an environment
|
111 |
+
conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0
|
112 |
+
if not conda_exist:
|
113 |
+
print("Conda is not installed. Exiting...")
|
114 |
+
sys.exit(1)
|
115 |
+
|
116 |
+
# Ensure this is a new environment and not the base environment
|
117 |
+
if os.environ["CONDA_DEFAULT_ENV"] == "base":
|
118 |
+
print("Create an environment for this project and activate it. Exiting...")
|
119 |
+
sys.exit(1)
|
120 |
+
|
121 |
+
|
122 |
+
def clear_cache():
|
123 |
+
run_cmd("conda clean -a -y", environment=True)
|
124 |
+
run_cmd("python -m pip cache purge", environment=True)
|
125 |
+
|
126 |
+
|
127 |
+
def print_big_message(message):
|
128 |
+
message = message.strip()
|
129 |
+
lines = message.split('\n')
|
130 |
+
print("\n\n*******************************************************************")
|
131 |
+
for line in lines:
|
132 |
+
if line.strip() != '':
|
133 |
+
print("*", line)
|
134 |
+
|
135 |
+
print("*******************************************************************\n\n")
|
136 |
+
|
137 |
+
|
138 |
+
def calculate_file_hash(file_path):
|
139 |
+
p = os.path.join(script_dir, file_path)
|
140 |
+
if os.path.isfile(p):
|
141 |
+
with open(p, 'rb') as f:
|
142 |
+
return hashlib.sha256(f.read()).hexdigest()
|
143 |
+
else:
|
144 |
+
return ''
|
145 |
+
|
146 |
+
|
147 |
+
def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
|
148 |
+
# Use the conda environment
|
149 |
+
if environment:
|
150 |
+
if is_windows():
|
151 |
+
conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat")
|
152 |
+
cmd = f'"{conda_bat_path}" activate "{conda_env_path}" >nul && {cmd}'
|
153 |
+
else:
|
154 |
+
conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh")
|
155 |
+
cmd = f'. "{conda_sh_path}" && conda activate "{conda_env_path}" && {cmd}'
|
156 |
+
|
157 |
+
# Run shell commands
|
158 |
+
result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env)
|
159 |
+
|
160 |
+
# Assert the command ran successfully
|
161 |
+
if assert_success and result.returncode != 0:
|
162 |
+
print(f"Command '{cmd}' failed with exit status code '{str(result.returncode)}'.\n\nExiting now.\nTry running the start/update script again.")
|
163 |
+
sys.exit(1)
|
164 |
+
|
165 |
+
return result
|
166 |
+
|
167 |
+
|
168 |
+
def install_webui():
|
169 |
+
# Select your GPU, or choose to run in CPU mode
|
170 |
+
if "GPU_CHOICE" in os.environ:
|
171 |
+
choice = os.environ["GPU_CHOICE"].upper()
|
172 |
+
print_big_message(f"Selected GPU choice \"{choice}\" based on the GPU_CHOICE environment variable.")
|
173 |
+
else:
|
174 |
+
print()
|
175 |
+
print("What is your GPU?")
|
176 |
+
print()
|
177 |
+
print("A) NVIDIA")
|
178 |
+
print("B) AMD (Linux/MacOS only. Requires ROCm SDK 5.6 on Linux)")
|
179 |
+
print("C) Apple M Series")
|
180 |
+
print("D) Intel Arc (IPEX)")
|
181 |
+
print("N) None (I want to run models in CPU mode)")
|
182 |
+
print()
|
183 |
+
|
184 |
+
choice = input("Input> ").upper()
|
185 |
+
while choice not in 'ABCDN':
|
186 |
+
print("Invalid choice. Please try again.")
|
187 |
+
choice = input("Input> ").upper()
|
188 |
+
|
189 |
+
gpu_choice_to_name = {
|
190 |
+
"A": "NVIDIA",
|
191 |
+
"B": "AMD",
|
192 |
+
"C": "APPLE",
|
193 |
+
"D": "INTEL",
|
194 |
+
"N": "NONE"
|
195 |
+
}
|
196 |
+
|
197 |
+
selected_gpu = gpu_choice_to_name[choice]
|
198 |
+
|
199 |
+
if selected_gpu == "NONE":
|
200 |
+
with open(cmd_flags_path, 'r+') as cmd_flags_file:
|
201 |
+
if "--cpu" not in cmd_flags_file.read():
|
202 |
+
print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.")
|
203 |
+
cmd_flags_file.write("\n--cpu")
|
204 |
+
|
205 |
+
# Find the proper Pytorch installation command
|
206 |
+
install_git = "conda install -y -k ninja git"
|
207 |
+
install_pytorch = "python -m pip install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* "
|
208 |
+
|
209 |
+
use_cuda118 = "N"
|
210 |
+
if any((is_windows(), is_linux())) and selected_gpu == "NVIDIA":
|
211 |
+
if "USE_CUDA118" in os.environ:
|
212 |
+
use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N"
|
213 |
+
else:
|
214 |
+
# Ask for CUDA version if using NVIDIA
|
215 |
+
print("\nDo you want to use CUDA 11.8 instead of 12.1? Only choose this option if your GPU is very old (Kepler or older).\nFor RTX and GTX series GPUs, say \"N\". If unsure, say \"N\".\n")
|
216 |
+
use_cuda118 = input("Input (Y/N)> ").upper().strip('"\'').strip()
|
217 |
+
while use_cuda118 not in 'YN':
|
218 |
+
print("Invalid choice. Please try again.")
|
219 |
+
use_cuda118 = input("Input> ").upper().strip('"\'').strip()
|
220 |
+
|
221 |
+
if use_cuda118 == 'Y':
|
222 |
+
print("CUDA: 11.8")
|
223 |
+
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
|
224 |
+
else:
|
225 |
+
print("CUDA: 12.1")
|
226 |
+
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
|
227 |
+
elif not is_macos() and selected_gpu == "AMD":
|
228 |
+
if is_linux():
|
229 |
+
install_pytorch += "--index-url https://download.pytorch.org/whl/rocm5.6"
|
230 |
+
else:
|
231 |
+
print("AMD GPUs are only supported on Linux. Exiting...")
|
232 |
+
sys.exit(1)
|
233 |
+
elif is_linux() and selected_gpu in ["APPLE", "NONE"]:
|
234 |
+
install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
|
235 |
+
elif selected_gpu == "INTEL":
|
236 |
+
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
|
237 |
+
|
238 |
+
# Install Git and then Pytorch
|
239 |
+
print_big_message("Installing PyTorch.")
|
240 |
+
run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
|
241 |
+
|
242 |
+
# Install CUDA libraries (this wasn't necessary for Pytorch before...)
|
243 |
+
if selected_gpu == "NVIDIA":
|
244 |
+
print_big_message("Installing the CUDA runtime libraries.")
|
245 |
+
run_cmd(f"conda install -y -c \"nvidia/label/{'cuda-12.1.1' if use_cuda118 == 'N' else 'cuda-11.8.0'}\" cuda-runtime", assert_success=True, environment=True)
|
246 |
+
|
247 |
+
if selected_gpu == "INTEL":
|
248 |
+
# Install oneAPI dependencies via conda
|
249 |
+
print_big_message("Installing Intel oneAPI runtime libraries.")
|
250 |
+
run_cmd("conda install -y -c intel dpcpp-cpp-rt=2024.0 mkl-dpcpp=2024.0")
|
251 |
+
# Install libuv required by Intel-patched torch
|
252 |
+
run_cmd("conda install -y libuv")
|
253 |
+
|
254 |
+
# Install the webui requirements
|
255 |
+
update_requirements(initial_installation=True)
|
256 |
+
|
257 |
+
|
258 |
+
def update_requirements(initial_installation=False):
|
259 |
+
# Create .git directory if missing
|
260 |
+
if not os.path.exists(os.path.join(script_dir, ".git")):
|
261 |
+
git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main && git reset --hard origin/main && git branch --set-upstream-to=origin/main'
|
262 |
+
run_cmd(git_creation_cmd, environment=True, assert_success=True)
|
263 |
+
|
264 |
+
files_to_check = [
|
265 |
+
'start_linux.sh', 'start_macos.sh', 'start_windows.bat', 'start_wsl.bat',
|
266 |
+
'update_linux.sh', 'update_macos.sh', 'update_windows.bat', 'update_wsl.bat',
|
267 |
+
'one_click.py'
|
268 |
+
]
|
269 |
+
|
270 |
+
before_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
|
271 |
+
run_cmd("git pull --autostash", assert_success=True, environment=True)
|
272 |
+
after_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
|
273 |
+
|
274 |
+
# Check for differences in installation file hashes
|
275 |
+
for file_name in files_to_check:
|
276 |
+
if before_pull_hashes[file_name] != after_pull_hashes[file_name]:
|
277 |
+
print_big_message(f"File '{file_name}' was updated during 'git pull'. Please run the script again.")
|
278 |
+
exit(1)
|
279 |
+
|
280 |
+
# Extensions requirements are installed only during the initial install by default.
|
281 |
+
# That can be changed with the INSTALL_EXTENSIONS environment variable.
|
282 |
+
install = initial_installation
|
283 |
+
if "INSTALL_EXTENSIONS" in os.environ:
|
284 |
+
install = os.environ["INSTALL_EXTENSIONS"].lower() in ("yes", "y", "true", "1", "t", "on")
|
285 |
+
|
286 |
+
if install:
|
287 |
+
print_big_message("Installing extensions requirements.")
|
288 |
+
skip = ['superbooga', 'superboogav2', 'coqui_tts'] # Fail to install on Windows
|
289 |
+
extensions = [foldername for foldername in os.listdir('extensions') if os.path.isfile(os.path.join('extensions', foldername, 'requirements.txt'))]
|
290 |
+
extensions = [x for x in extensions if x not in skip]
|
291 |
+
for i, extension in enumerate(extensions):
|
292 |
+
print(f"\n\n--- [{i+1}/{len(extensions)}]: {extension}\n\n")
|
293 |
+
extension_req_path = os.path.join("extensions", extension, "requirements.txt")
|
294 |
+
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", assert_success=False, environment=True)
|
295 |
+
elif initial_installation:
|
296 |
+
print_big_message("Will not install extensions due to INSTALL_EXTENSIONS environment variable.")
|
297 |
+
|
298 |
+
# Detect the Python and PyTorch versions
|
299 |
+
torver = torch_version()
|
300 |
+
is_cuda = '+cu' in torver
|
301 |
+
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
|
302 |
+
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
|
303 |
+
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
|
304 |
+
is_cpu = '+cpu' in torver # 2.0.1+cpu
|
305 |
+
|
306 |
+
if is_rocm:
|
307 |
+
base_requirements = "requirements_amd" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
|
308 |
+
elif is_cpu or is_intel:
|
309 |
+
base_requirements = "requirements_cpu_only" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
|
310 |
+
elif is_macos():
|
311 |
+
base_requirements = "requirements_apple_" + ("intel" if is_x86_64() else "silicon") + ".txt"
|
312 |
+
else:
|
313 |
+
base_requirements = "requirements" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
|
314 |
+
|
315 |
+
requirements_file = base_requirements
|
316 |
+
|
317 |
+
print_big_message(f"Installing webui requirements from file: {requirements_file}")
|
318 |
+
print(f"TORCH: {torver}\n")
|
319 |
+
|
320 |
+
# Prepare the requirements file
|
321 |
+
textgen_requirements = open(requirements_file).read().splitlines()
|
322 |
+
if is_cuda118:
|
323 |
+
textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]
|
324 |
+
if is_windows() and is_cuda118: # No flash-attention on Windows for CUDA 11
|
325 |
+
textgen_requirements = [req for req in textgen_requirements if 'jllllll/flash-attention' not in req]
|
326 |
+
|
327 |
+
with open('temp_requirements.txt', 'w') as file:
|
328 |
+
file.write('\n'.join(textgen_requirements))
|
329 |
+
|
330 |
+
# Workaround for git+ packages not updating properly.
|
331 |
+
git_requirements = [req for req in textgen_requirements if req.startswith("git+")]
|
332 |
+
for req in git_requirements:
|
333 |
+
url = req.replace("git+", "")
|
334 |
+
package_name = url.split("/")[-1].split("@")[0].rstrip(".git")
|
335 |
+
run_cmd(f"python -m pip uninstall -y {package_name}", environment=True)
|
336 |
+
print(f"Uninstalled {package_name}")
|
337 |
+
|
338 |
+
# Make sure that API requirements are installed (temporary)
|
339 |
+
extension_req_path = os.path.join("extensions", "openai", "requirements.txt")
|
340 |
+
if os.path.exists(extension_req_path):
|
341 |
+
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", environment=True)
|
342 |
+
|
343 |
+
# Install/update the project requirements
|
344 |
+
run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True)
|
345 |
+
os.remove('temp_requirements.txt')
|
346 |
+
|
347 |
+
# Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility
|
348 |
+
if not any((is_cuda, is_rocm)) and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1:
|
349 |
+
clear_cache()
|
350 |
+
return
|
351 |
+
|
352 |
+
if not os.path.exists("repositories/"):
|
353 |
+
os.mkdir("repositories")
|
354 |
+
|
355 |
+
clear_cache()
|
356 |
+
|
357 |
+
|
358 |
+
def launch_webui():
|
359 |
+
run_cmd(f"python server.py {flags}", environment=True)
|
360 |
+
|
361 |
+
|
362 |
+
if __name__ == "__main__":
|
363 |
+
# Verifies we are in a conda environment
|
364 |
+
check_env()
|
365 |
+
|
366 |
+
parser = argparse.ArgumentParser(add_help=False)
|
367 |
+
parser.add_argument('--update', action='store_true', help='Update the web UI.')
|
368 |
+
args, _ = parser.parse_known_args()
|
369 |
+
|
370 |
+
if args.update:
|
371 |
+
update_requirements()
|
372 |
+
else:
|
373 |
+
# If webui has already been installed, skip and run
|
374 |
+
if not is_installed():
|
375 |
+
install_webui()
|
376 |
+
os.chdir(script_dir)
|
377 |
+
|
378 |
+
if os.environ.get("LAUNCH_AFTER_INSTALL", "").lower() in ("no", "n", "false", "0", "f", "off"):
|
379 |
+
print_big_message("Install finished successfully and will now exit due to LAUNCH_AFTER_INSTALL.")
|
380 |
+
sys.exit()
|
381 |
+
|
382 |
+
# Check if a model has been downloaded yet
|
383 |
+
if '--model-dir' in flags:
|
384 |
+
# Splits on ' ' or '=' while maintaining spaces within quotes
|
385 |
+
flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags)
|
386 |
+
model_dir = [flags_list[(flags_list.index(flag) + 1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'')
|
387 |
+
else:
|
388 |
+
model_dir = 'models'
|
389 |
+
|
390 |
+
if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
|
391 |
+
print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.")
|
392 |
+
|
393 |
+
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
|
394 |
+
conda_path_bin = os.path.join(conda_env_path, "bin")
|
395 |
+
if not os.path.exists(conda_path_bin):
|
396 |
+
os.mkdir(conda_path_bin)
|
397 |
+
|
398 |
+
# Launch the webui
|
399 |
+
launch_webui()
|
requirements.txt
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12; platform_system != "Darwin" and platform_machine != "x86_64"
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# llama-cpp-python (CPU only, AVX2)
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
36 |
+
|
37 |
+
# llama-cpp-python (CUDA, no tensor cores)
|
38 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
39 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
40 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
41 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
42 |
+
|
43 |
+
# llama-cpp-python (CUDA, tensor cores)
|
44 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
45 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
46 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
47 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
48 |
+
|
49 |
+
# CUDA wheels
|
50 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
51 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
52 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
53 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
54 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
55 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
56 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
57 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
58 |
+
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
59 |
+
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
60 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
61 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
62 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
63 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
64 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
65 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
66 |
+
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.27+cu121-py3-none-any.whl
|
67 |
+
autoawq==0.1.8; platform_system == "Linux" or platform_system == "Windows"
|
requirements_amd.txt
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12; platform_system == "Windows" or python_version < "3.10" or python_version > "3.11" or platform_machine != "x86_64"
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.38.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.38.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# llama-cpp-python (CPU only, AVX2)
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
36 |
+
|
37 |
+
# AMD wheels
|
38 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.38+rocm5.6.1-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
39 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.38+rocm5.6.1-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
40 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
41 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
42 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
43 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
44 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
45 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
requirements_amd_noavx2.txt
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12; platform_system == "Windows" or python_version < "3.10" or python_version > "3.11" or platform_machine != "x86_64"
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.38.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.38.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# llama-cpp-python (CPU only, no AVX2)
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
36 |
+
|
37 |
+
# AMD wheels
|
38 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
39 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
40 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
41 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
42 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
43 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
requirements_apple_intel.txt
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# Mac wheels
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
36 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
37 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
requirements_apple_silicon.txt
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# Mac wheels
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
36 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11"
|
37 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10"
|
38 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
39 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.38-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
requirements_cpu_only.txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# llama-cpp-python (CPU only, AVX2)
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
requirements_cpu_only_noavx2.txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# llama-cpp-python (CPU only, no AVX2)
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
requirements_noavx2.txt
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12; platform_system != "Darwin" and platform_machine != "x86_64"
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
30 |
+
|
31 |
+
# llama-cpp-python (CPU only, no AVX2)
|
32 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
33 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
34 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
35 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.38+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
36 |
+
|
37 |
+
# llama-cpp-python (CUDA, no tensor cores)
|
38 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
39 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
40 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
41 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.38+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
42 |
+
|
43 |
+
# llama-cpp-python (CUDA, tensor cores)
|
44 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
45 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
46 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
47 |
+
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.38+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
48 |
+
|
49 |
+
# CUDA wheels
|
50 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
51 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
52 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
53 |
+
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
54 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
55 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
56 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
57 |
+
https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
58 |
+
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
59 |
+
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
60 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
61 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
62 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
63 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
64 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
65 |
+
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
66 |
+
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX/ctransformers-0.2.27+cu121-py3-none-any.whl
|
67 |
+
autoawq==0.1.8; platform_system == "Linux" or platform_system == "Windows"
|
requirements_nowheels.txt
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.25.*
|
2 |
+
colorama
|
3 |
+
datasets
|
4 |
+
einops
|
5 |
+
exllamav2==0.0.12
|
6 |
+
gradio==3.50.*
|
7 |
+
hqq==0.1.2.post1
|
8 |
+
jinja2==3.1.2
|
9 |
+
lm_eval==0.3.0
|
10 |
+
markdown
|
11 |
+
numpy==1.24.*
|
12 |
+
optimum==1.16.*
|
13 |
+
pandas
|
14 |
+
peft==0.7.*
|
15 |
+
Pillow>=9.5.0
|
16 |
+
pyyaml
|
17 |
+
requests
|
18 |
+
rich
|
19 |
+
safetensors==0.4.*
|
20 |
+
scipy
|
21 |
+
sentencepiece
|
22 |
+
tensorboard
|
23 |
+
transformers==4.37.*
|
24 |
+
tqdm
|
25 |
+
wandb
|
26 |
+
|
27 |
+
# bitsandbytes
|
28 |
+
bitsandbytes==0.41.1; platform_system != "Windows"
|
29 |
+
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
server.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import warnings
|
3 |
+
|
4 |
+
from modules import shared
|
5 |
+
|
6 |
+
import accelerate # This early import makes Intel GPUs happy
|
7 |
+
|
8 |
+
import modules.one_click_installer_check
|
9 |
+
from modules.block_requests import OpenMonkeyPatch, RequestBlocker
|
10 |
+
from modules.logging_colors import logger
|
11 |
+
|
12 |
+
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
|
13 |
+
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
|
14 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
|
15 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
|
16 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict')
|
17 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='The value passed into gr.Dropdown()')
|
18 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_names" has conflict')
|
19 |
+
|
20 |
+
with RequestBlocker():
|
21 |
+
import gradio as gr
|
22 |
+
|
23 |
+
import matplotlib
|
24 |
+
|
25 |
+
matplotlib.use('Agg') # This fixes LaTeX rendering on some systems
|
26 |
+
|
27 |
+
import json
|
28 |
+
import os
|
29 |
+
import signal
|
30 |
+
import sys
|
31 |
+
import time
|
32 |
+
from functools import partial
|
33 |
+
from pathlib import Path
|
34 |
+
from threading import Lock
|
35 |
+
|
36 |
+
import yaml
|
37 |
+
|
38 |
+
import modules.extensions as extensions_module
|
39 |
+
from modules import (
|
40 |
+
chat,
|
41 |
+
training,
|
42 |
+
ui,
|
43 |
+
ui_chat,
|
44 |
+
ui_default,
|
45 |
+
ui_file_saving,
|
46 |
+
ui_model_menu,
|
47 |
+
ui_notebook,
|
48 |
+
ui_parameters,
|
49 |
+
ui_session,
|
50 |
+
utils
|
51 |
+
)
|
52 |
+
from modules.extensions import apply_extensions
|
53 |
+
from modules.LoRA import add_lora_to_model
|
54 |
+
from modules.models import load_model
|
55 |
+
from modules.models_settings import (
|
56 |
+
get_fallback_settings,
|
57 |
+
get_model_metadata,
|
58 |
+
update_model_parameters
|
59 |
+
)
|
60 |
+
from modules.shared import do_cmd_flags_warnings
|
61 |
+
from modules.utils import gradio
|
62 |
+
|
63 |
+
|
64 |
+
def signal_handler(sig, frame):
|
65 |
+
logger.info("Received Ctrl+C. Shutting down Text generation web UI gracefully.")
|
66 |
+
sys.exit(0)
|
67 |
+
|
68 |
+
|
69 |
+
signal.signal(signal.SIGINT, signal_handler)
|
70 |
+
|
71 |
+
|
72 |
+
def create_interface():
|
73 |
+
|
74 |
+
title = 'Text generation web UI'
|
75 |
+
|
76 |
+
# Password authentication
|
77 |
+
auth = []
|
78 |
+
if shared.args.gradio_auth:
|
79 |
+
auth.extend(x.strip() for x in shared.args.gradio_auth.strip('"').replace('\n', '').split(',') if x.strip())
|
80 |
+
if shared.args.gradio_auth_path:
|
81 |
+
with open(shared.args.gradio_auth_path, 'r', encoding="utf8") as file:
|
82 |
+
auth.extend(x.strip() for line in file for x in line.split(',') if x.strip())
|
83 |
+
auth = [tuple(cred.split(':')) for cred in auth]
|
84 |
+
|
85 |
+
# Import the extensions and execute their setup() functions
|
86 |
+
if shared.args.extensions is not None and len(shared.args.extensions) > 0:
|
87 |
+
extensions_module.load_extensions()
|
88 |
+
|
89 |
+
# Force some events to be triggered on page load
|
90 |
+
shared.persistent_interface_state.update({
|
91 |
+
'loader': shared.args.loader or 'Transformers',
|
92 |
+
'mode': shared.settings['mode'],
|
93 |
+
'character_menu': shared.args.character or shared.settings['character'],
|
94 |
+
'instruction_template_str': shared.settings['instruction_template_str'],
|
95 |
+
'prompt_menu-default': shared.settings['prompt-default'],
|
96 |
+
'prompt_menu-notebook': shared.settings['prompt-notebook'],
|
97 |
+
'filter_by_loader': shared.args.loader or 'All'
|
98 |
+
})
|
99 |
+
|
100 |
+
if Path("cache/pfp_character.png").exists():
|
101 |
+
Path("cache/pfp_character.png").unlink()
|
102 |
+
|
103 |
+
# css/js strings
|
104 |
+
css = ui.css
|
105 |
+
js = ui.js
|
106 |
+
css += apply_extensions('css')
|
107 |
+
js += apply_extensions('js')
|
108 |
+
|
109 |
+
# Interface state elements
|
110 |
+
shared.input_elements = ui.list_interface_input_elements()
|
111 |
+
|
112 |
+
with gr.Blocks(css=css, analytics_enabled=False, title=title, theme=ui.theme) as shared.gradio['interface']:
|
113 |
+
|
114 |
+
# Interface state
|
115 |
+
shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
|
116 |
+
|
117 |
+
# Audio notification
|
118 |
+
if Path("notification.mp3").exists():
|
119 |
+
shared.gradio['audio_notification'] = gr.Audio(interactive=False, value="notification.mp3", elem_id="audio_notification", visible=False)
|
120 |
+
|
121 |
+
# Floating menus for saving/deleting files
|
122 |
+
ui_file_saving.create_ui()
|
123 |
+
|
124 |
+
# Temporary clipboard for saving files
|
125 |
+
shared.gradio['temporary_text'] = gr.Textbox(visible=False)
|
126 |
+
|
127 |
+
# Text Generation tab
|
128 |
+
ui_chat.create_ui()
|
129 |
+
ui_default.create_ui()
|
130 |
+
ui_notebook.create_ui()
|
131 |
+
|
132 |
+
ui_parameters.create_ui(shared.settings['preset']) # Parameters tab
|
133 |
+
ui_model_menu.create_ui() # Model tab
|
134 |
+
training.create_ui() # Training tab
|
135 |
+
ui_session.create_ui() # Session tab
|
136 |
+
|
137 |
+
# Generation events
|
138 |
+
ui_chat.create_event_handlers()
|
139 |
+
ui_default.create_event_handlers()
|
140 |
+
ui_notebook.create_event_handlers()
|
141 |
+
|
142 |
+
# Other events
|
143 |
+
ui_file_saving.create_event_handlers()
|
144 |
+
ui_parameters.create_event_handlers()
|
145 |
+
ui_model_menu.create_event_handlers()
|
146 |
+
|
147 |
+
# Interface launch events
|
148 |
+
if shared.settings['dark_theme']:
|
149 |
+
shared.gradio['interface'].load(lambda: None, None, None, _js="() => document.getElementsByTagName('body')[0].classList.add('dark')")
|
150 |
+
|
151 |
+
shared.gradio['interface'].load(lambda: None, None, None, _js=f"() => {{{js}}}")
|
152 |
+
shared.gradio['interface'].load(None, gradio('show_controls'), None, _js=f'(x) => {{{ui.show_controls_js}; toggle_controls(x)}}')
|
153 |
+
shared.gradio['interface'].load(partial(ui.apply_interface_values, {}, use_persistent=True), None, gradio(ui.list_interface_input_elements()), show_progress=False)
|
154 |
+
shared.gradio['interface'].load(chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
|
155 |
+
|
156 |
+
extensions_module.create_extensions_tabs() # Extensions tabs
|
157 |
+
extensions_module.create_extensions_block() # Extensions block
|
158 |
+
|
159 |
+
# Launch the interface
|
160 |
+
shared.gradio['interface'].queue(concurrency_count=64)
|
161 |
+
with OpenMonkeyPatch():
|
162 |
+
shared.gradio['interface'].launch(
|
163 |
+
prevent_thread_lock=True,
|
164 |
+
share=True,
|
165 |
+
server_name=None if not shared.args.listen else (shared.args.listen_host or '0.0.0.0'),
|
166 |
+
server_port=shared.args.listen_port,
|
167 |
+
inbrowser=shared.args.auto_launch,
|
168 |
+
auth=auth or None,
|
169 |
+
ssl_verify=False if (shared.args.ssl_keyfile or shared.args.ssl_certfile) else True,
|
170 |
+
ssl_keyfile=shared.args.ssl_keyfile,
|
171 |
+
ssl_certfile=shared.args.ssl_certfile
|
172 |
+
)
|
173 |
+
|
174 |
+
|
175 |
+
if __name__ == "__main__":
|
176 |
+
|
177 |
+
logger.info("Starting Text generation web UI")
|
178 |
+
do_cmd_flags_warnings()
|
179 |
+
|
180 |
+
# Load custom settings
|
181 |
+
settings_file = None
|
182 |
+
if shared.args.settings is not None and Path(shared.args.settings).exists():
|
183 |
+
settings_file = Path(shared.args.settings)
|
184 |
+
elif Path('settings.yaml').exists():
|
185 |
+
settings_file = Path('settings.yaml')
|
186 |
+
elif Path('settings.json').exists():
|
187 |
+
settings_file = Path('settings.json')
|
188 |
+
|
189 |
+
if settings_file is not None:
|
190 |
+
logger.info(f"Loading settings from {settings_file}")
|
191 |
+
file_contents = open(settings_file, 'r', encoding='utf-8').read()
|
192 |
+
new_settings = json.loads(file_contents) if settings_file.suffix == "json" else yaml.safe_load(file_contents)
|
193 |
+
shared.settings.update(new_settings)
|
194 |
+
|
195 |
+
# Fallback settings for models
|
196 |
+
shared.model_config['.*'] = get_fallback_settings()
|
197 |
+
shared.model_config.move_to_end('.*', last=False) # Move to the beginning
|
198 |
+
|
199 |
+
# Activate the extensions listed on settings.yaml
|
200 |
+
extensions_module.available_extensions = utils.get_available_extensions()
|
201 |
+
for extension in shared.settings['default_extensions']:
|
202 |
+
shared.args.extensions = shared.args.extensions or []
|
203 |
+
if extension not in shared.args.extensions:
|
204 |
+
shared.args.extensions.append(extension)
|
205 |
+
|
206 |
+
available_models = utils.get_available_models()
|
207 |
+
|
208 |
+
# Model defined through --model
|
209 |
+
if shared.args.model is not None:
|
210 |
+
shared.model_name = shared.args.model
|
211 |
+
|
212 |
+
# Select the model from a command-line menu
|
213 |
+
elif shared.args.model_menu:
|
214 |
+
if len(available_models) == 0:
|
215 |
+
logger.error('No models are available! Please download at least one.')
|
216 |
+
sys.exit(0)
|
217 |
+
else:
|
218 |
+
print('The following models are available:\n')
|
219 |
+
for i, model in enumerate(available_models):
|
220 |
+
print(f'{i+1}. {model}')
|
221 |
+
|
222 |
+
print(f'\nWhich one do you want to load? 1-{len(available_models)}\n')
|
223 |
+
i = int(input()) - 1
|
224 |
+
print()
|
225 |
+
|
226 |
+
shared.model_name = available_models[i]
|
227 |
+
|
228 |
+
# If any model has been selected, load it
|
229 |
+
if shared.model_name != 'None':
|
230 |
+
p = Path(shared.model_name)
|
231 |
+
if p.exists():
|
232 |
+
model_name = p.parts[-1]
|
233 |
+
shared.model_name = model_name
|
234 |
+
else:
|
235 |
+
model_name = shared.model_name
|
236 |
+
|
237 |
+
model_settings = get_model_metadata(model_name)
|
238 |
+
update_model_parameters(model_settings, initial=True) # hijack the command-line arguments
|
239 |
+
|
240 |
+
# Load the model
|
241 |
+
shared.model, shared.tokenizer = load_model(model_name)
|
242 |
+
if shared.args.lora:
|
243 |
+
add_lora_to_model(shared.args.lora)
|
244 |
+
|
245 |
+
shared.generation_lock = Lock()
|
246 |
+
|
247 |
+
if shared.args.nowebui:
|
248 |
+
# Start the API in standalone mode
|
249 |
+
shared.args.extensions = [x for x in shared.args.extensions if x != 'gallery']
|
250 |
+
if shared.args.extensions is not None and len(shared.args.extensions) > 0:
|
251 |
+
extensions_module.load_extensions()
|
252 |
+
else:
|
253 |
+
# Launch the web UI
|
254 |
+
create_interface()
|
255 |
+
while True:
|
256 |
+
time.sleep(0.5)
|
257 |
+
if shared.need_restart:
|
258 |
+
shared.need_restart = False
|
259 |
+
time.sleep(0.5)
|
260 |
+
shared.gradio['interface'].close()
|
261 |
+
time.sleep(0.5)
|
262 |
+
create_interface()
|
settings-template.yaml
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dark_theme: true
|
2 |
+
show_controls: true
|
3 |
+
start_with: ''
|
4 |
+
mode: chat
|
5 |
+
chat_style: cai-chat
|
6 |
+
prompt-default: QA
|
7 |
+
prompt-notebook: QA
|
8 |
+
preset: simple-1
|
9 |
+
max_new_tokens: 512
|
10 |
+
max_new_tokens_min: 1
|
11 |
+
max_new_tokens_max: 4096
|
12 |
+
negative_prompt: ''
|
13 |
+
seed: -1
|
14 |
+
truncation_length: 2048
|
15 |
+
truncation_length_min: 0
|
16 |
+
truncation_length_max: 200000
|
17 |
+
max_tokens_second: 0
|
18 |
+
max_updates_second: 0
|
19 |
+
prompt_lookup_num_tokens: 0
|
20 |
+
custom_stopping_strings: ''
|
21 |
+
custom_token_bans: ''
|
22 |
+
auto_max_new_tokens: false
|
23 |
+
ban_eos_token: false
|
24 |
+
add_bos_token: true
|
25 |
+
skip_special_tokens: true
|
26 |
+
stream: true
|
27 |
+
character: Assistant
|
28 |
+
name1: You
|
29 |
+
custom_system_message: ''
|
30 |
+
instruction_template_str: |-
|
31 |
+
{%- set ns = namespace(found=false) -%}
|
32 |
+
{%- for message in messages -%}
|
33 |
+
{%- if message['role'] == 'system' -%}
|
34 |
+
{%- set ns.found = true -%}
|
35 |
+
{%- endif -%}
|
36 |
+
{%- endfor -%}
|
37 |
+
{%- if not ns.found -%}
|
38 |
+
{{- '' + 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' + '\n\n' -}}
|
39 |
+
{%- endif %}
|
40 |
+
{%- for message in messages %}
|
41 |
+
{%- if message['role'] == 'system' -%}
|
42 |
+
{{- '' + message['content'] + '\n\n' -}}
|
43 |
+
{%- else -%}
|
44 |
+
{%- if message['role'] == 'user' -%}
|
45 |
+
{{-'### Instruction:\n' + message['content'] + '\n\n'-}}
|
46 |
+
{%- else -%}
|
47 |
+
{{-'### Response:\n' + message['content'] + '\n\n' -}}
|
48 |
+
{%- endif -%}
|
49 |
+
{%- endif -%}
|
50 |
+
{%- endfor -%}
|
51 |
+
{%- if add_generation_prompt -%}
|
52 |
+
{{-'### Response:\n'-}}
|
53 |
+
{%- endif -%}
|
54 |
+
chat_template_str: |-
|
55 |
+
{%- for message in messages %}
|
56 |
+
{%- if message['role'] == 'system' -%}
|
57 |
+
{{- message['content'] + '\n\n' -}}
|
58 |
+
{%- else -%}
|
59 |
+
{%- if message['role'] == 'user' -%}
|
60 |
+
{{- name1 + ': ' + message['content'] + '\n'-}}
|
61 |
+
{%- else -%}
|
62 |
+
{{- name2 + ': ' + message['content'] + '\n' -}}
|
63 |
+
{%- endif -%}
|
64 |
+
{%- endif -%}
|
65 |
+
{%- endfor -%}
|
66 |
+
chat-instruct_command: |-
|
67 |
+
Continue the chat dialogue below. Write a single reply for the character "<|character|>".
|
68 |
+
|
69 |
+
<|prompt|>
|
70 |
+
autoload_model: false
|
71 |
+
gallery-items_per_page: 50
|
72 |
+
gallery-open: false
|
73 |
+
default_extensions:
|
74 |
+
- gallery
|
settings.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
prompt-default: None
|
2 |
+
prompt-notebook: None
|
3 |
+
preset: Midnight Enigma
|
4 |
+
truncation_length: 4096
|
5 |
+
character: Devia
|
6 |
+
default_extensions:
|
7 |
+
- openai
|
8 |
+
- gallery
|
setup.cfg
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[pycodestyle]
|
2 |
+
max-line-length = 120
|
3 |
+
ignore = E402, E501, E722
|
start_linux.sh
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
OS_ARCH=$(uname -m)
|
11 |
+
case "${OS_ARCH}" in
|
12 |
+
x86_64*) OS_ARCH="x86_64";;
|
13 |
+
arm64*) OS_ARCH="aarch64";;
|
14 |
+
aarch64*) OS_ARCH="aarch64";;
|
15 |
+
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
16 |
+
esac
|
17 |
+
|
18 |
+
# config
|
19 |
+
INSTALL_DIR="$(pwd)/installer_files"
|
20 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
21 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
22 |
+
MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-${OS_ARCH}.sh"
|
23 |
+
conda_exists="F"
|
24 |
+
|
25 |
+
# figure out whether git and conda needs to be installed
|
26 |
+
if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi
|
27 |
+
|
28 |
+
# (if necessary) install git and conda into a contained environment
|
29 |
+
# download miniconda
|
30 |
+
if [ "$conda_exists" == "F" ]; then
|
31 |
+
echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh"
|
32 |
+
|
33 |
+
mkdir -p "$INSTALL_DIR"
|
34 |
+
curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh"
|
35 |
+
|
36 |
+
chmod u+x "$INSTALL_DIR/miniconda_installer.sh"
|
37 |
+
bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX
|
38 |
+
|
39 |
+
# test the conda binary
|
40 |
+
echo "Miniconda version:"
|
41 |
+
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
42 |
+
fi
|
43 |
+
|
44 |
+
# create the installer env
|
45 |
+
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
46 |
+
"$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.11
|
47 |
+
fi
|
48 |
+
|
49 |
+
# check if conda environment was actually created
|
50 |
+
if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then
|
51 |
+
echo "Conda environment is empty."
|
52 |
+
exit
|
53 |
+
fi
|
54 |
+
|
55 |
+
# environment isolation
|
56 |
+
export PYTHONNOUSERSITE=1
|
57 |
+
unset PYTHONPATH
|
58 |
+
unset PYTHONHOME
|
59 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
60 |
+
export CUDA_HOME="$CUDA_PATH"
|
61 |
+
|
62 |
+
# activate installer env
|
63 |
+
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
64 |
+
conda activate "$INSTALL_ENV_DIR"
|
65 |
+
|
66 |
+
# setup installer env
|
67 |
+
python one_click.py $@
|
start_macos.sh
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# M Series or Intel
|
11 |
+
OS_ARCH=$(uname -m)
|
12 |
+
case "${OS_ARCH}" in
|
13 |
+
x86_64*) OS_ARCH="x86_64";;
|
14 |
+
arm64*) OS_ARCH="arm64";;
|
15 |
+
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
16 |
+
esac
|
17 |
+
|
18 |
+
# config
|
19 |
+
INSTALL_DIR="$(pwd)/installer_files"
|
20 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
21 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
22 |
+
MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-MacOSX-${OS_ARCH}.sh"
|
23 |
+
conda_exists="F"
|
24 |
+
|
25 |
+
# figure out whether git and conda needs to be installed
|
26 |
+
if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi
|
27 |
+
|
28 |
+
# (if necessary) install git and conda into a contained environment
|
29 |
+
# download miniconda
|
30 |
+
if [ "$conda_exists" == "F" ]; then
|
31 |
+
echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh"
|
32 |
+
|
33 |
+
mkdir -p "$INSTALL_DIR"
|
34 |
+
curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh"
|
35 |
+
|
36 |
+
chmod u+x "$INSTALL_DIR/miniconda_installer.sh"
|
37 |
+
bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX
|
38 |
+
|
39 |
+
# test the conda binary
|
40 |
+
echo "Miniconda version:"
|
41 |
+
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
42 |
+
fi
|
43 |
+
|
44 |
+
# create the installer env
|
45 |
+
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
46 |
+
"$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.11
|
47 |
+
fi
|
48 |
+
|
49 |
+
# check if conda environment was actually created
|
50 |
+
if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then
|
51 |
+
echo "Conda environment is empty."
|
52 |
+
exit
|
53 |
+
fi
|
54 |
+
|
55 |
+
# environment isolation
|
56 |
+
export PYTHONNOUSERSITE=1
|
57 |
+
unset PYTHONPATH
|
58 |
+
unset PYTHONHOME
|
59 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
60 |
+
export CUDA_HOME="$CUDA_PATH"
|
61 |
+
|
62 |
+
# activate installer env
|
63 |
+
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
64 |
+
conda activate "$INSTALL_ENV_DIR"
|
65 |
+
|
66 |
+
# setup installer env
|
67 |
+
python one_click.py $@
|
start_windows.bat
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
|
8 |
+
|
9 |
+
@rem Check for special characters in installation path
|
10 |
+
set "SPCHARMESSAGE="WARNING: Special characters were detected in the installation path!" " This can cause the installation to fail!""
|
11 |
+
echo "%CD%"| findstr /R /C:"[!#\$%&()\*+,;<=>?@\[\]\^`{|}~]" >nul && (
|
12 |
+
call :PrintBigMessage %SPCHARMESSAGE%
|
13 |
+
)
|
14 |
+
set SPCHARMESSAGE=
|
15 |
+
|
16 |
+
@rem fix failed install when installing to a separate drive
|
17 |
+
set TMP=%cd%\installer_files
|
18 |
+
set TEMP=%cd%\installer_files
|
19 |
+
|
20 |
+
@rem deactivate existing conda envs as needed to avoid conflicts
|
21 |
+
(call conda deactivate && call conda deactivate && call conda deactivate) 2>nul
|
22 |
+
|
23 |
+
@rem config
|
24 |
+
set INSTALL_DIR=%cd%\installer_files
|
25 |
+
set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
|
26 |
+
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
27 |
+
set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Windows-x86_64.exe
|
28 |
+
set conda_exists=F
|
29 |
+
|
30 |
+
@rem figure out whether git and conda needs to be installed
|
31 |
+
call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1
|
32 |
+
if "%ERRORLEVEL%" EQU "0" set conda_exists=T
|
33 |
+
|
34 |
+
@rem (if necessary) install git and conda into a contained environment
|
35 |
+
@rem download conda
|
36 |
+
if "%conda_exists%" == "F" (
|
37 |
+
echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe
|
38 |
+
|
39 |
+
mkdir "%INSTALL_DIR%"
|
40 |
+
call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end )
|
41 |
+
|
42 |
+
echo Installing Miniconda to %CONDA_ROOT_PREFIX%
|
43 |
+
start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX%
|
44 |
+
|
45 |
+
@rem test the conda binary
|
46 |
+
echo Miniconda version:
|
47 |
+
call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end )
|
48 |
+
)
|
49 |
+
|
50 |
+
@rem create the installer env
|
51 |
+
if not exist "%INSTALL_ENV_DIR%" (
|
52 |
+
echo Packages to install: %PACKAGES_TO_INSTALL%
|
53 |
+
call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.11 || ( echo. && echo Conda environment creation failed. && goto end )
|
54 |
+
)
|
55 |
+
|
56 |
+
@rem check if conda environment was actually created
|
57 |
+
if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end )
|
58 |
+
|
59 |
+
@rem environment isolation
|
60 |
+
set PYTHONNOUSERSITE=1
|
61 |
+
set PYTHONPATH=
|
62 |
+
set PYTHONHOME=
|
63 |
+
set "CUDA_PATH=%INSTALL_ENV_DIR%"
|
64 |
+
set "CUDA_HOME=%CUDA_PATH%"
|
65 |
+
|
66 |
+
@rem activate installer env
|
67 |
+
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
|
68 |
+
|
69 |
+
@rem setup installer env
|
70 |
+
call python one_click.py %*
|
71 |
+
|
72 |
+
@rem below are functions for the script next line skips these during normal execution
|
73 |
+
goto end
|
74 |
+
|
75 |
+
:PrintBigMessage
|
76 |
+
echo. && echo.
|
77 |
+
echo *******************************************************************
|
78 |
+
for %%M in (%*) do echo * %%~M
|
79 |
+
echo *******************************************************************
|
80 |
+
echo. && echo.
|
81 |
+
exit /b
|
82 |
+
|
83 |
+
:end
|
84 |
+
pause
|
start_wsl.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script
|
8 |
+
call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh %*"
|
9 |
+
|
10 |
+
:end
|
11 |
+
pause
|
update_linux.sh
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# config
|
11 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
+
|
14 |
+
# environment isolation
|
15 |
+
export PYTHONNOUSERSITE=1
|
16 |
+
unset PYTHONPATH
|
17 |
+
unset PYTHONHOME
|
18 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
+
export CUDA_HOME="$CUDA_PATH"
|
20 |
+
|
21 |
+
# activate installer env
|
22 |
+
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
23 |
+
conda activate "$INSTALL_ENV_DIR"
|
24 |
+
|
25 |
+
# update installer env
|
26 |
+
python one_click.py --update && echo -e "\nDone!"
|
update_macos.sh
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# config
|
11 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
+
|
14 |
+
# environment isolation
|
15 |
+
export PYTHONNOUSERSITE=1
|
16 |
+
unset PYTHONPATH
|
17 |
+
unset PYTHONHOME
|
18 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
+
export CUDA_HOME="$CUDA_PATH"
|
20 |
+
|
21 |
+
# activate installer env
|
22 |
+
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
23 |
+
conda activate "$INSTALL_ENV_DIR"
|
24 |
+
|
25 |
+
# update installer env
|
26 |
+
python one_click.py --update && echo -e "\nDone!"
|
update_windows.bat
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
|
8 |
+
|
9 |
+
@rem fix failed install when installing to a separate drive
|
10 |
+
set TMP=%cd%\installer_files
|
11 |
+
set TEMP=%cd%\installer_files
|
12 |
+
|
13 |
+
@rem deactivate existing conda envs as needed to avoid conflicts
|
14 |
+
(call conda deactivate && call conda deactivate && call conda deactivate) 2>nul
|
15 |
+
|
16 |
+
@rem config
|
17 |
+
set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
|
18 |
+
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
19 |
+
|
20 |
+
@rem environment isolation
|
21 |
+
set PYTHONNOUSERSITE=1
|
22 |
+
set PYTHONPATH=
|
23 |
+
set PYTHONHOME=
|
24 |
+
set "CUDA_PATH=%INSTALL_ENV_DIR%"
|
25 |
+
set "CUDA_HOME=%CUDA_PATH%"
|
26 |
+
|
27 |
+
@rem activate installer env
|
28 |
+
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
|
29 |
+
|
30 |
+
@rem update installer env
|
31 |
+
call python one_click.py --update && (
|
32 |
+
echo.
|
33 |
+
echo Done!
|
34 |
+
)
|
35 |
+
|
36 |
+
:end
|
37 |
+
pause
|
update_wsl.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script calling wsl.sh with 'update' will run updater
|
8 |
+
call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh update"
|
9 |
+
|
10 |
+
:end
|
11 |
+
pause
|
wsl.sh
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# detect if build-essential is missing or broken
|
4 |
+
if ! dpkg-query -W -f'${Status}' "build-essential" 2>/dev/null | grep -q "ok installed"; then
|
5 |
+
echo "build-essential not found or broken!
|
6 |
+
|
7 |
+
A C++ compiler is required to build needed Python packages!
|
8 |
+
To install one, run cmd_wsl.bat and enter these commands:
|
9 |
+
|
10 |
+
sudo apt-get update
|
11 |
+
sudo apt-get install build-essential
|
12 |
+
"
|
13 |
+
read -n1 -p "Continue the installer anyway? [y,n]" EXIT_PROMPT
|
14 |
+
# only continue if user inputs 'y' else exit
|
15 |
+
if ! [[ $EXIT_PROMPT == "Y" || $EXIT_PROMPT == "y" ]]; then exit; fi
|
16 |
+
fi
|
17 |
+
|
18 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
19 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
20 |
+
|
21 |
+
# config unlike other scripts, can't use current directory due to file IO bug in WSL, needs to be in virtual drive
|
22 |
+
INSTALL_DIR_PREFIX="$HOME/text-gen-install"
|
23 |
+
if [[ ! $(realpath "$(pwd)/..") = /mnt/* ]]; then
|
24 |
+
INSTALL_DIR_PREFIX="$(realpath "$(pwd)/..")" && INSTALL_INPLACE=1
|
25 |
+
fi
|
26 |
+
INSTALL_DIR="$INSTALL_DIR_PREFIX/text-generation-webui"
|
27 |
+
CONDA_ROOT_PREFIX="$INSTALL_DIR/installer_files/conda"
|
28 |
+
INSTALL_ENV_DIR="$INSTALL_DIR/installer_files/env"
|
29 |
+
MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-x86_64.sh"
|
30 |
+
conda_exists="F"
|
31 |
+
|
32 |
+
# environment isolation
|
33 |
+
export PYTHONNOUSERSITE=1
|
34 |
+
unset PYTHONPATH
|
35 |
+
unset PYTHONHOME
|
36 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
37 |
+
export CUDA_HOME="$CUDA_PATH"
|
38 |
+
|
39 |
+
# /usr/lib/wsl/lib needs to be added to LD_LIBRARY_PATH to fix years-old bug in WSL where GPU drivers aren't linked properly
|
40 |
+
export LD_LIBRARY_PATH="$CUDA_HOME/lib:/usr/lib/wsl/lib:$LD_LIBRARY_PATH"
|
41 |
+
|
42 |
+
# open bash cli if called with 'wsl.sh cmd' with workarounds for existing conda
|
43 |
+
if [ "$1" == "cmd" ]; then
|
44 |
+
exec bash --init-file <(echo ". ~/.bashrc; conda deactivate 2> /dev/null; cd $INSTALL_DIR || cd $HOME; source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh; conda activate $INSTALL_ENV_DIR")
|
45 |
+
exit
|
46 |
+
fi
|
47 |
+
|
48 |
+
if [[ "$INSTALL_DIR" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
49 |
+
|
50 |
+
# create install dir if missing
|
51 |
+
if [ ! -d "$INSTALL_DIR" ]; then mkdir -p "$INSTALL_DIR" || exit; fi
|
52 |
+
|
53 |
+
# figure out whether git and conda needs to be installed
|
54 |
+
if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi
|
55 |
+
|
56 |
+
# (if necessary) install git and conda into a contained environment
|
57 |
+
# download miniconda
|
58 |
+
if [ "$conda_exists" == "F" ]; then
|
59 |
+
echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh"
|
60 |
+
|
61 |
+
curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh"
|
62 |
+
|
63 |
+
chmod u+x "$INSTALL_DIR/miniconda_installer.sh"
|
64 |
+
bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX
|
65 |
+
|
66 |
+
# test the conda binary
|
67 |
+
echo "Miniconda version:"
|
68 |
+
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
69 |
+
fi
|
70 |
+
|
71 |
+
# create the installer env
|
72 |
+
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
73 |
+
"$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.11 git
|
74 |
+
fi
|
75 |
+
|
76 |
+
# check if conda environment was actually created
|
77 |
+
if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then
|
78 |
+
echo "Conda environment is empty."
|
79 |
+
exit
|
80 |
+
fi
|
81 |
+
|
82 |
+
# activate installer env
|
83 |
+
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
84 |
+
conda activate "$INSTALL_ENV_DIR"
|
85 |
+
|
86 |
+
pushd $INSTALL_DIR 1> /dev/null || exit
|
87 |
+
|
88 |
+
if [ ! -f "./server.py" ]; then
|
89 |
+
git init -b main
|
90 |
+
git remote add origin https://github.com/oobabooga/text-generation-webui
|
91 |
+
git fetch
|
92 |
+
git remote set-head origin -a
|
93 |
+
git reset origin/HEAD --hard
|
94 |
+
git branch --set-upstream-to=origin/HEAD
|
95 |
+
git restore -- . :!./CMD_FLAGS.txt
|
96 |
+
fi
|
97 |
+
|
98 |
+
# copy CMD_FLAGS.txt to install dir to allow edits within Windows
|
99 |
+
if [[ $INSTALL_INPLACE != 1 ]]; then
|
100 |
+
# workaround for old install migration
|
101 |
+
if [ ! -f "./wsl.sh" ]; then
|
102 |
+
git pull || exit
|
103 |
+
[ -f "../webui.py" ] && mv "../webui.py" "../webui-old.py"
|
104 |
+
fi
|
105 |
+
if [ -f "$(dirs +1)/CMD_FLAGS.txt" ] && [ -f "./CMD_FLAGS.txt" ]; then cp -u "$(dirs +1)/CMD_FLAGS.txt" "$INSTALL_DIR"; fi
|
106 |
+
fi
|
107 |
+
|
108 |
+
# setup installer env update env if called with 'wsl.sh update'
|
109 |
+
case "$1" in
|
110 |
+
("update") python one_click.py --update;;
|
111 |
+
(*) python one_click.py $@;;
|
112 |
+
esac
|