Skip to content

Commit e9b9c51

Browse files
committed
Initial commit pre-dev-release testing.
2 parents 653f434 + 5522584 commit e9b9c51

32 files changed

+850
-451
lines changed

.github/dependabot.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,6 @@ version: 2
77
updates:
88
- package-ecosystem: "pip" # See documentation for possible values
99
directory: "/" # Location of package manifests
10+
target-branch: "dev"
1011
schedule:
1112
interval: "weekly"

.github/workflows/stale.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ jobs:
1313
- uses: actions/stale@v5
1414
with:
1515
stale-issue-message: ""
16-
close-issue-message: "This issue has been closed due to inactivity for 2 months. If you believe it is still relevant, please leave a comment below. You can tag a developer in your comment."
17-
days-before-issue-stale: 60
16+
close-issue-message: "This issue has been closed due to inactivity for 6 months. If you believe it is still relevant, please leave a comment below. You can tag a developer in your comment."
17+
days-before-issue-stale: 180
1818
days-before-issue-close: 0
1919
stale-issue-label: "stale"
2020
days-before-pr-stale: -1

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,3 +129,4 @@ start-webui.sh
129129

130130
files-to-sync.txt
131131
rsync-exclude.lst
132+
.editorconfig

Colab-TextGen-GPU.ipynb

Lines changed: 16 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
"source": [
2323
"# oobabooga/text-generation-webui\n",
2424
"\n",
25-
"After running both cells, a public gradio URL will appear at the bottom in a few minutes. You can optionally generate an API link.\n",
25+
"After running both cells, a public gradio URL will appear at the bottom in around 10 minutes. You can optionally generate an API link.\n",
2626
"\n",
2727
"* Project page: https://github.com/oobabooga/text-generation-webui\n",
2828
"* Gradio server status: https://status.gradio.app/"
@@ -53,44 +53,28 @@
5353
"\n",
5454
"#@markdown If unsure about the branch, write \"main\" or leave it blank.\n",
5555
"\n",
56-
"import torch\n",
56+
"import os\n",
5757
"from pathlib import Path\n",
5858
"\n",
59+
"os.environ.pop('PYTHONPATH', None)\n",
60+
"\n",
5961
"if Path.cwd().name != 'text-generation-webui':\n",
60-
" print(\"Installing the webui...\")\n",
62+
" print(\"\\033[1;32;1m\\n --> Installing the web UI. This will take a while, but after the initial setup, you can download and test as many models as you like.\\033[0;37;0m\\n\")\n",
6163
"\n",
6264
" !git clone https://github.com/oobabooga/text-generation-webui\n",
6365
" %cd text-generation-webui\n",
6466
"\n",
65-
" torver = torch.__version__\n",
66-
" print(f\"TORCH: {torver}\")\n",
67-
" is_cuda118 = '+cu118' in torver # 2.1.0+cu118\n",
68-
"\n",
69-
" if is_cuda118:\n",
70-
" !python -m pip install --upgrade torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118\n",
71-
" else:\n",
72-
" !python -m pip install --upgrade torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121\n",
73-
"\n",
74-
" textgen_requirements = open('requirements.txt').read().splitlines()\n",
75-
" if is_cuda118:\n",
76-
" textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]\n",
77-
" with open('temp_requirements.txt', 'w') as file:\n",
78-
" file.write('\\n'.join(textgen_requirements))\n",
79-
"\n",
80-
" !pip install -r temp_requirements.txt --upgrade\n",
81-
"\n",
82-
" print(\"\\033[1;32;1m\\n --> If you see a warning about \\\"previously imported packages\\\", just ignore it.\\033[0;37;0m\")\n",
83-
" print(\"\\033[1;32;1m\\n --> There is no need to restart the runtime.\\n\\033[0;37;0m\")\n",
84-
"\n",
85-
" try:\n",
86-
" import flash_attn\n",
87-
" except:\n",
88-
" !pip uninstall -y flash_attn\n",
67+
" # Install the project in an isolated environment\n",
68+
" !GPU_CHOICE=A \\\n",
69+
" USE_CUDA118=FALSE \\\n",
70+
" LAUNCH_AFTER_INSTALL=FALSE \\\n",
71+
" INSTALL_EXTENSIONS=FALSE \\\n",
72+
" ./start_linux.sh\n",
8973
"\n",
9074
"# Parameters\n",
91-
"model_url = \"https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ\" #@param {type:\"string\"}\n",
92-
"branch = \"gptq-4bit-32g-actorder_True\" #@param {type:\"string\"}\n",
93-
"command_line_flags = \"--n-gpu-layers 128 --load-in-4bit --use_double_quant\" #@param {type:\"string\"}\n",
75+
"model_url = \"https://huggingface.co/turboderp/gemma-2-9b-it-exl2\" #@param {type:\"string\"}\n",
76+
"branch = \"8.0bpw\" #@param {type:\"string\"}\n",
77+
"command_line_flags = \"--n-gpu-layers 128 --load-in-4bit --use_double_quant --no_flash_attn\" #@param {type:\"string\"}\n",
9478
"api = False #@param {type:\"boolean\"}\n",
9579
"\n",
9680
"if api:\n",
@@ -116,11 +100,10 @@
116100
" output_folder = \"\"\n",
117101
"\n",
118102
"# Start the web UI\n",
119-
"cmd = f\"python server.py --share\"\n",
103+
"cmd = f\"./start_linux.sh {command_line_flags} --share\"\n",
120104
"if output_folder != \"\":\n",
121105
" cmd += f\" --model {output_folder}\"\n",
122-
"cmd += f\" {command_line_flags}\"\n",
123-
"print(cmd)\n",
106+
"\n",
124107
"!$cmd"
125108
],
126109
"metadata": {

0 commit comments

Comments
 (0)