From dd3954690e69ddd2c274378593fcd94c540c9c3e Mon Sep 17 00:00:00 2001 From: Jucelio Quentino Date: Fri, 7 Nov 2025 10:10:39 -0300 Subject: [PATCH 1/3] feat: add pyproject.toml to enable local execution using UV package manager --- README.md | 44 ++++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 21 +++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 pyproject.toml diff --git a/README.md b/README.md index 10afeca6..78ec280b 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,50 @@ On this Github repo, navigate to the lab folder you want to run (`lab1`, `lab2`, ## Running the labs Now, to run the labs, open the Jupyter notebook on Colab. Navigate to the "Runtime" tab --> "Change runtime type". In the pop-up window, under "Runtime type" select "Python 3", and under "Hardware accelerator" select "GPU". Go through the notebooks and fill in the `#TODO` cells to get the code to compile for yourself! +## Running the labs locally with uv + +If you prefer to run the notebooks locally you can use the project's `uv` environment tool (used in this repo). The basic workflow is: + +1. Open PowerShell and change to the repository root directory: + +```powershell +cd \path\to\introtodeeplearning +``` + +2. (Optional) Verify your Python version matches the project's `requires-python` (see `pyproject.toml`): + +```powershell +python --version +``` + +3. Install the project dependencies into the `uv` environment. The dependencies and any custom package indexes are declared in `pyproject.toml`. To install everything at once run: + +```powershell +uv sync +``` + +This will install all dependencies listed in `pyproject.toml` and automatically use any indexes defined there (for example, the PyTorch CUDA index declared in this repo). + +Notes: +- If `cu126` is incompatible with your machine, simply edit `pyproject.toml` and change the `url` value to the desired CUDA tag. For example, to use CUDA 12.1 change the URL to: + +```toml +[[tool.uv.index]] +name = "pytorch" +url = "https://download.pytorch.org/whl/cu121" +``` + +You can use the PyTorch get-started selector (https://pytorch.org/get-started/locally/) to find the appropriate index URL. + +This keeps the index configuration in a single place and makes it easy to switch CUDA versions for all contributors. + +4. Start Jupyter Notebook or JupyterLab from the `uv` environment: + +```powershell +uv run jupyter notebook +# or +uv run jupyter lab +``` ### MIT Deep Learning package You might notice that inside the labs we install the `mitdeeplearning` python package from the Python Package repository: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..aab82f52 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,21 @@ +[project] +name = "introtodeeplearning" +version = "0.7.5" +license = "MIT" +description = "Lab Materials for MIT 6.S191: Introduction to Deep Learning" +readme = "README.md" +requires-python = ">=3.12,<3.13" +dependencies = [ + "comet-ml>=3.54.0", + "dotenv>=0.9.9", + "matplotlib>=3.10.7", + "mitdeeplearning>=0.7.5", + "opencv-python>=4.12.0.88", + "scipy>=1.16.3", + "torch>=2.9.0", + "torchvision>=0.24.0", +] + +[[tool.uv.index]] +name = "pytorch" +url = "https://download.pytorch.org/whl/cu126" From df153403fc5bd084c2f153c64ca43f692d693aeb Mon Sep 17 00:00:00 2001 From: Jucelio Quentino Date: Fri, 7 Nov 2025 10:17:20 -0300 Subject: [PATCH 2/3] mnt: add deps --- pyproject.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index aab82f52..22ac6439 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,13 +7,23 @@ readme = "README.md" requires-python = ">=3.12,<3.13" dependencies = [ "comet-ml>=3.54.0", + "datasets>=2.19.1", "dotenv>=0.9.9", + "gym>=0.26.2", + "lion-pytorch>=0.2.3", "matplotlib>=3.10.7", "mitdeeplearning>=0.7.5", + "numpy>=2.1.2", + "openai>=2.7.1", "opencv-python>=4.12.0.88", + "opik>=1.8.99", + "peft>=0.17.1", + "regex>=2025.11.3", "scipy>=1.16.3", "torch>=2.9.0", "torchvision>=0.24.0", + "tqdm>=4.66.5", + "transformers>=4.57.1", ] [[tool.uv.index]] From 513914b4185069b0ce95df3b1ac38768ed961d52 Mon Sep 17 00:00:00 2001 From: Jucelio Quentino Date: Fri, 7 Nov 2025 10:29:25 -0300 Subject: [PATCH 3/3] docs: clarify section on running labs locally with uv for PyTorch --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 78ec280b..8d1f1fef 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ On this Github repo, navigate to the lab folder you want to run (`lab1`, `lab2`, ## Running the labs Now, to run the labs, open the Jupyter notebook on Colab. Navigate to the "Runtime" tab --> "Change runtime type". In the pop-up window, under "Runtime type" select "Python 3", and under "Hardware accelerator" select "GPU". Go through the notebooks and fill in the `#TODO` cells to get the code to compile for yourself! -## Running the labs locally with uv +## Running the labs locally with uv (only for PyTorch labs) If you prefer to run the notebooks locally you can use the project's `uv` environment tool (used in this repo). The basic workflow is: