Skip to content

Commit c84f846

Browse files
committed
bugfix(pyproject) Convert from dependency groups to extras and update docks to use UV's built in torch support
1 parent 4b5c481 commit c84f846

File tree

7 files changed

+833
-333
lines changed

7 files changed

+833
-333
lines changed

docker/.env.sample

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@
2222
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
2323
# GPU_DRIVER=cuda #| rocm
2424

25+
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
26+
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
27+
# RENDER_GROUP_ID=
28+
2529
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
2630
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
2731
# CONTAINER_UID=1000

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
7474
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
7575
--mount=type=bind,source=invokeai/version,target=invokeai/version \
7676
ulimit -n 30000 && \
77-
uv sync --group $GPU_DRIVER --frozen
77+
uv sync --extra $GPU_DRIVER --frozen
7878

7979
# Link amdgpu.ids for ROCm builds
8080
# contributed by https://github.com/Rubonnek

docker/docker-compose.yml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,8 @@ services:
4848
invokeai-rocm:
4949
<<: *invokeai
5050
environment:
51-
# if set, CONTAINER_INVOKEAI_ROOT will override the Invoke runtime directory location *inside* the container
52-
- INVOKEAI_ROOT=${CONTAINER_INVOKEAI_ROOT:-/invokeai}
53-
- HF_HOME
5451
- AMD_VISIBLE_DEVICES=all
55-
- RENDER_GROUP_ID=993
52+
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
5653
runtime: amd
5754
profiles:
5855
- rocm

docker/docker-entrypoint.sh

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ set -e -o pipefail
1414
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
1515
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
1616

17+
printenv
18+
1719
USER_ID=${CONTAINER_UID:-1000}
1820
USER=ubuntu
1921
# if the user does not exist, create it. It is expected to be present on ubuntu >=24.x
@@ -24,9 +26,13 @@ usermod -u ${USER_ID} ${USER} 1>/dev/null
2426
## ROCM specific configuration
2527
# render group within the container must match the host render group
2628
# otherwise the container will not be able to access the host GPU.
27-
groupmod -g ${RENDER_GROUP_ID:-993} render
28-
usermod -a -G render ${USER}
29-
usermod -a -G video ${USER}
29+
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
30+
# ensure the render group exists
31+
groupmod -g ${RENDER_GROUP_ID} render
32+
usermod -a -G render ${USER}
33+
usermod -a -G video ${USER}
34+
fi
35+
3036

3137
### Set the $PUBLIC_KEY env var to enable SSH access.
3238
# We do not install openssh-server in the image by default to avoid bloat.

docs/installation/manual.md

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -69,34 +69,34 @@ The following commands vary depending on the version of Invoke being installed a
6969
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
7070
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
7171
72-
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
72+
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using (UV's built in torch support.)[https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection]
7373

7474
=== "Invoke v5.12 and later"
7575

76-
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
77-
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
78-
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
79-
- **In all other cases, do not use an index.**
76+
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
77+
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
78+
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
79+
- **In all other cases, do not use a torch backend.**
8080

8181
=== "Invoke v5.10.0 to v5.11.0"
8282

83-
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
84-
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
85-
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
83+
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
84+
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
85+
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
8686
- **In all other cases, do not use an index.**
8787

8888
=== "Invoke v5.0.0 to v5.9.1"
8989

90-
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
91-
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
92-
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.1`.
90+
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
91+
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
92+
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
9393
- **In all other cases, do not use an index.**
9494

9595
=== "Invoke v4"
9696

97-
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
98-
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
99-
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm5.2`.
97+
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
98+
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
99+
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
100100
- **In all other cases, do not use an index.**
101101

102102
8. Install the `invokeai` package. Substitute the package specifier and version.
@@ -105,10 +105,10 @@ The following commands vary depending on the version of Invoke being installed a
105105
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
106106
```
107107

108-
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
108+
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
109109
110110
```sh
111-
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
111+
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
112112
```
113113
114114
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:

pyproject.toml

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -76,28 +76,20 @@ dependencies = [
7676
"semver~=3.0.1",
7777
]
7878

79-
[dependency-groups]
80-
cpu = ["torch==2.7.1+cpu", "torchvision==0.22.1+cpu"]
81-
cuda = ["torch==2.7.1+cu128", "torchvision==0.22.1+cu128"]
82-
rocm = ["torch==2.7.1+rocm6.3", "torchvision==0.22.1+rocm6.3"]
83-
8479
[project.optional-dependencies]
8580
"xformers" = [
8681
# Core generation dependencies, pinned for reproducible builds.
8782
"xformers>=0.0.28.post1; sys_platform!='darwin'",
8883
# torch 2.4+cu carries its own triton dependency
8984
]
9085

91-
# These enable the usage of installing the package with specific support.
92-
# uv pip install .[rocm] --python 3.12 --python-preference only-managed --force-reinstall --index-strategy unsafe-best-match
93-
# Problem is that these break `uv lock --index-strategy unsafe-best-match`
94-
# This does work though, as the pyproject.toml has the indexes defined.
95-
# uv pip install . torch==2.7.1+rocm6.3 --force-reinstall --index-strategy unsafe-best-match
96-
# Maybe we update the docs to show these instead of the --index way?
97-
98-
# cpu = ["torch==2.7.1+cpu"]
99-
# cuda = ["torch==2.7.1+cu128"]
100-
# rocm = ["torch==2.7.1+rocm6.3"]
86+
"cpu" = ["torch==2.7.1+cpu", "torchvision==0.22.1+cpu"]
87+
"cuda" = ["torch==2.7.1+cu128", "torchvision==0.22.1+cu128"]
88+
"rocm" = [
89+
"torch==2.7.1+rocm6.3",
90+
"torchvision==0.22.1+rocm6.3",
91+
"pytorch-triton-rocm",
92+
]
10193

10294
"onnx" = ["onnxruntime"]
10395
"onnx-cuda" = ["onnxruntime-gpu"]
@@ -129,25 +121,38 @@ rocm = ["torch==2.7.1+rocm6.3", "torchvision==0.22.1+rocm6.3"]
129121
# Prevent opencv-python from ever being chosen during dependency resolution.
130122
# This prevents conflicts with opencv-contrib-python, which Invoke requires.
131123
override-dependencies = ["opencv-python; sys_platform=='never'"]
132-
conflicts = [[{ group = "cpu" }, { group = "cuda" }, { group = "rocm" }]]
124+
conflicts = [[{ extra = "cpu" }, { extra = "cuda" }, { extra = "rocm" }]]
125+
index-strategy = "unsafe-best-match"
126+
127+
[tool.uv.sources]
128+
torch = [
129+
{ index = "torch-cpu", extra = "cpu" },
130+
{ index = "torch-cuda", extra = "cuda" },
131+
{ index = "torch-rocm", extra = "rocm" },
132+
]
133+
torchvision = [
134+
{ index = "torch-cpu", extra = "cpu" },
135+
{ index = "torch-cuda", extra = "cuda" },
136+
{ index = "torch-rocm", extra = "rocm" },
137+
]
138+
pytorch-triton-rocm = [
139+
{ index = "torch-rocm", extra = "rocm", marker = "sys_platform == 'linux'" },
140+
]
133141

134-
# This will cause: `uv lock --index-strategy unsafe-best-match` to be needed for future locks
135-
# If you are updating these, make sure to update the docker/Dockerfile as well.
136142
[[tool.uv.index]]
137143
name = "torch-cpu"
138144
url = "https://download.pytorch.org/whl/cpu"
139-
group = "cpu"
145+
explicit = true
140146

141147
[[tool.uv.index]]
142148
name = "torch-cuda"
143149
url = "https://download.pytorch.org/whl/cu128"
144-
group = "cuda"
150+
explicit = true
145151

146152
[[tool.uv.index]]
147153
name = "torch-rocm"
148154
url = "https://download.pytorch.org/whl/rocm6.3"
149-
group = "rocm"
150-
155+
explicit = true
151156

152157
[project.scripts]
153158
"invokeai-web" = "invokeai.app.run_app:run_app"

0 commit comments

Comments
 (0)