Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.

Commit 1025d51

Browse files
CI: case excluding CPU (#158)
* skip pip for a dry run * cleaning downloads * adding gpu ipynb * unzip progress * devel requirements Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent e0c48eb commit 1025d51

File tree

10 files changed

+139
-36
lines changed

10 files changed

+139
-36
lines changed

.actions/assistant.py

Lines changed: 39 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
PATH_REQ_DEFAULT = os.path.join(_PATH_ROOT, "requirements", "default.txt")
2121
PATH_SCRIPT_RENDER = os.path.join(_PATH_HERE, "_ipynb-render.sh")
2222
PATH_SCRIPT_TEST = os.path.join(_PATH_HERE, "_ipynb-test.sh")
23+
# https://askubuntu.com/questions/909918/how-to-show-unzip-progress
24+
UNZIP_PROGRESS_BAR = ' | awk \'BEGIN {ORS=" "} {if(NR%10==0)print "."}\''
2325
REPO_NAME = "lightning-tutorials"
2426
COLAB_REPO_LINK = "https://colab.research.google.com/github/PytorchLightning"
2527
BRANCH_DEFAULT = "main"
@@ -136,8 +138,9 @@ def get_running_torch_version():
136138
class AssistantCLI:
137139
"""Collection of handy CLI commands."""
138140

139-
DEVICE_ACCELERATOR = os.environ.get("ACCELERATOR", "cpu").lower()
140-
DATASET_FOLDER = os.environ.get("PATH_DATASETS", "_datasets").lower()
141+
_LOCAL_ACCELERATOR = "cpu,gpu" if get_running_cuda_version() else "cpu"
142+
DEVICE_ACCELERATOR = os.environ.get("ACCELERATOR", _LOCAL_ACCELERATOR).lower()
143+
DATASETS_FOLDER = os.environ.get("PATH_DATASETS", "_datasets")
141144
DRY_RUN = bool(int(os.environ.get("DRY_RUN", 0)))
142145
_META_REQUIRED_FIELDS = ("title", "author", "license", "description")
143146
_SKIP_DIRS = (
@@ -152,7 +155,7 @@ class AssistantCLI:
152155
)
153156
_META_FILE_REGEX = ".meta.{yaml,yml}"
154157
_META_PIP_KEY = "pip__"
155-
_META_ACCEL_DEFAULT = ("CPU",)
158+
_META_ACCEL_DEFAULT = _LOCAL_ACCELERATOR.split(",")
156159

157160
# Map directory names to tag names. Note that dashes will be replaced with spaces in rendered tags in the docs.
158161
_DIR_TO_TAG = {
@@ -268,16 +271,15 @@ def _parse_requirements(folder: str) -> Tuple[str, str]:
268271

269272
@staticmethod
270273
def _bash_download_data(folder: str) -> List[str]:
271-
"""Generate sequence of commands fro optional downloading dataset specified in the meta file.
274+
"""Generate sequence of commands for optional downloading dataset specified in the meta file.
272275
273276
Args:
274277
folder: path to the folder with python script, meta and artefacts
275278
"""
276-
cmd = ["HERE=$PWD", f"cd {AssistantCLI.DATASET_FOLDER}"]
277279
meta = AssistantCLI._load_meta(folder)
278280
datasets = meta.get("datasets", {})
279281
data_kaggle = datasets.get("kaggle", [])
280-
cmd += [f"python -m kaggle competitions download -c {name}" for name in data_kaggle]
282+
cmd = [f"python -m kaggle competitions download -c {name}" for name in data_kaggle]
281283
files = [f"{name}.zip" for name in data_kaggle]
282284
data_web = datasets.get("web", [])
283285
cmd += [f"wget {web} --progress=bar:force:noscroll --tries=3" for web in data_web]
@@ -287,11 +289,11 @@ def _bash_download_data(folder: str) -> List[str]:
287289
if ext not in AssistantCLI._EXT_ARCHIVE:
288290
continue
289291
if ext in AssistantCLI._EXT_ARCHIVE_ZIP:
290-
cmd += [f"mkdir -p {name}", f"unzip -o {fn} -d {name}"]
292+
cmd += [f"unzip -o {fn} -d {AssistantCLI.DATASETS_FOLDER}/{name} {UNZIP_PROGRESS_BAR}"]
291293
else:
292294
cmd += [f"tar -zxvf {fn} --overwrite"]
293295
cmd += [f"rm {fn}"]
294-
cmd += ["ls -l", "cd $HERE"]
296+
cmd += [f"tree -L 2 {AssistantCLI.DATASETS_FOLDER}"]
295297
return cmd
296298

297299
@staticmethod
@@ -310,26 +312,29 @@ def bash_render(folder: str, output_file: str = PATH_SCRIPT_RENDER) -> Optional[
310312
cmd += AssistantCLI._bash_download_data(folder)
311313
ipynb_file, meta_file, thumb_file = AssistantCLI._valid_folder(folder, ext=".ipynb")
312314
pub_ipynb = os.path.join(DIR_NOTEBOOKS, f"{folder}.ipynb")
315+
pub_meta = pub_ipynb.replace(".ipynb", ".yaml")
313316
pub_dir = os.path.dirname(pub_ipynb)
314317
thumb_ext = os.path.splitext(thumb_file)[-1] if thumb_file else "."
315318
pub_thumb = os.path.join(DIR_NOTEBOOKS, f"{folder}{thumb_ext}") if thumb_file else ""
316319
cmd.append(f"mkdir -p {pub_dir}")
317-
pip_req, pip_args = AssistantCLI._parse_requirements(folder)
318-
cmd += [f"pip install {pip_req} {pip_args}", "pip list"]
319320
if AssistantCLI.DRY_RUN:
320321
# dry run does not execute the notebooks just takes them as they are
321322
cmd.append(f"cp {ipynb_file} {pub_ipynb}")
323+
# copy and add meta config
324+
cmd += [f"cp {meta_file} {pub_meta}", f"cat {pub_meta}", f"git add {pub_meta}"]
322325
else:
326+
pip_req, pip_args = AssistantCLI._parse_requirements(folder)
327+
cmd += [f"pip install {pip_req} --quiet {pip_args}", "pip list"]
323328
cmd.append(f"# available: {AssistantCLI.DEVICE_ACCELERATOR}\n")
324329
if AssistantCLI._valid_accelerator(folder):
325330
cmd.append(f"python -m papermill {ipynb_file} {pub_ipynb} --kernel python")
326331
else:
327332
warn("Invalid notebook's accelerator for this device. So no outputs will be generated.", RuntimeWarning)
328333
cmd.append(f"cp {ipynb_file} {pub_ipynb}")
329-
# Export the actual packages used in runtime
330-
cmd.append(f"meta_file=$(python .actions/assistant.py update-env-details {folder})")
331-
# copy and add to version the enriched meta config
332-
cmd += ["echo $meta_file", "cat $meta_file", "git add $meta_file"]
334+
# Export the actual packages used in runtime
335+
cmd.append(f"meta_file=$(python .actions/assistant.py update-env-details {folder})")
336+
# copy and add to version the enriched meta config
337+
cmd += ["echo $meta_file", "cat $meta_file", "git add $meta_file"]
333338
# if thumb image is linked to the notebook, copy and version it too
334339
if thumb_file:
335340
cmd += [f"cp {thumb_file} {pub_thumb}", f"git add {pub_thumb}"]
@@ -353,26 +358,36 @@ def bash_test(folder: str, output_file: str = PATH_SCRIPT_TEST) -> Optional[str]
353358
"""
354359
cmd = list(AssistantCLI._BASH_SCRIPT_BASE) + [f"# Testing: {folder}"]
355360
cmd += AssistantCLI._bash_download_data(folder)
356-
ipynb_file, _, _ = AssistantCLI._valid_folder(folder, ext=".ipynb")
361+
ipynb_file, meta_file, _ = AssistantCLI._valid_folder(folder, ext=".ipynb")
357362

358363
# prepare isolated environment with inheriting the global packages
364+
path_venv = os.path.join(folder, "venv")
359365
cmd += [
360-
f"python -m virtualenv --system-site-packages {os.path.join(folder, 'venv')}",
361-
f"source {os.path.join(folder, 'venv', 'bin', 'activate')}",
366+
f"python -m virtualenv --system-site-packages {path_venv}",
367+
f"source {os.path.join(path_venv, 'bin', 'activate')}",
362368
"pip --version",
363369
]
364-
# and install specific packages
365-
pip_req, pip_args = AssistantCLI._parse_requirements(folder)
366-
cmd += [f"pip install {pip_req} {pip_args}", "pip list"]
367-
# Export the actual packages used in runtime
368-
cmd.append(f"meta_file=$(python .actions/assistant.py update-env-details {folder} --base_path .)")
369-
# show created meta config
370-
cmd += ["echo $meta_file", "cat $meta_file"]
371370

372371
cmd.append(f"# available: {AssistantCLI.DEVICE_ACCELERATOR}")
373372
if AssistantCLI._valid_accelerator(folder):
373+
# and install specific packages
374+
pip_req, pip_args = AssistantCLI._parse_requirements(folder)
375+
cmd += [f"pip install {pip_req} --quiet {pip_args}", "pip list"]
376+
# Export the actual packages used in runtime
377+
cmd.append(f"meta_file=$(python .actions/assistant.py update-env-details {folder} --base_path .)")
378+
# show created meta config
379+
cmd += ["echo $meta_file", "cat $meta_file"]
374380
cmd.append(f"python -m pytest {ipynb_file} -v --nbval --nbval-cell-timeout=300")
375381
else:
382+
pub_ipynb = os.path.join(DIR_NOTEBOOKS, f"{folder}.ipynb")
383+
pub_meta = pub_ipynb.replace(".ipynb", ".yaml")
384+
# copy and add meta config
385+
cmd += [
386+
f"mkdir -p {os.path.dirname(pub_meta)}",
387+
f"cp {meta_file} {pub_meta}",
388+
f"cat {pub_meta}",
389+
f"git add {pub_meta}",
390+
]
376391
warn("Invalid notebook's accelerator for this device. So no tests will be run!!!", RuntimeWarning)
377392
# deactivate and clean local environment
378393
cmd += ["deactivate", f"rm -rf {os.path.join(folder, 'venv')}"]

.azure/ipynb-publish.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ jobs:
6161
6262
- bash: |
6363
set -e
64+
sudo apt install -y tree
6465
pip --version
6566
pip install --requirement requirements.txt
6667
pip install --requirement requirements/data.txt

.azure/ipynb-tests.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ jobs:
4141
4242
- bash: |
4343
set -e
44+
sudo apt install -y tree
4445
pip --version
4546
pip install --requirement requirements.txt
4647
pip install --requirement requirements/data.txt

.github/workflows/ci_testing.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ jobs:
1919
# Timeout: https://stackoverflow.com/a/59076067/4521646
2020
timeout-minutes: 55
2121
env:
22-
ACCELERATOR: CPU
2322
PATH_DATASETS: ${{ github.workspace }}/.datasets
2423

2524
steps:
@@ -75,6 +74,7 @@ jobs:
7574
KAGGLE_KEY: ${{ secrets.KAGGLE_KEY }}
7675
run: |
7776
set -e
77+
sudo apt install -y tree
7878
while IFS= read -r line; do
7979
python .actions/assistant.py bash-test $line
8080
cat .actions/_ipynb-test.sh

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ repos:
1414
- id: end-of-file-fixer
1515
- id: trailing-whitespace
1616
- id: check-case-conflict
17+
- id: check-json
1718
- id: check-yaml
1819
- id: check-toml
19-
- id: pretty-format-json
2020
- id: check-added-large-files
2121
args: ['--maxkb=250', '--enforce-all']
2222
- id: check-docstring-first
@@ -26,7 +26,7 @@ repos:
2626
rev: v2.31.1
2727
hooks:
2828
- id: pyupgrade
29-
args: [--py36-plus]
29+
args: [--py37-plus]
3030
name: Upgrade code
3131

3232
- repo: https://github.com/myint/docformatter

requirements/default.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
ipython[notebook]
2-
pytorch-lightning>=1.3
3-
torchmetrics>=0.3
2+
pytorch-lightning>=1.4
3+
torchmetrics>=0.6
44
torch>=1.6, <1.9

requirements/devel.txt

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
virtualenv
2-
jupytext # converting
1+
virtualenv>=20.10
2+
jupytext>=1.10 # converting
33
pytest>=6.0
4-
nbval # testing
4+
nbval>=0.9.6 # testing
55
papermill>=2.3.4 # render
6-
black
7-
flake8
8-
isort

templates/img-classify/.meta.yml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
title: Simple image classification with Lightning Flash
2+
author: PL team
3+
created: 2022-04-14
4+
updated: 2021-06-16
5+
license: CC BY-SA
6+
build: 0
7+
tags:
8+
- Image
9+
description: |
10+
This is a template to show simple image classification case if for some reason accelerator is required.
11+
requirements:
12+
- lightning-flash[image]>=0.7
13+
- pandas>=1.0
14+
- matplotlib>=3.0
15+
- seaborn
16+
accelerator:
17+
- GPU
18+
datasets:
19+
web:
20+
- https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip

templates/img-classify/classify.py

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# %%
2+
import os
3+
4+
import flash
5+
import matplotlib.pyplot as plt
6+
import pandas as pd
7+
import seaborn as sn
8+
from flash.image import ImageClassificationData, ImageClassifier
9+
from pytorch_lightning.loggers import CSVLogger
10+
11+
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
12+
# this dataset is automatically downloaded and extracted based on meta link
13+
# this archive includes the one more level - folder with the same name
14+
DATA_HYMENOPLERA = os.path.join(PATH_DATASETS, "hymenoptera_data", "hymenoptera_data")
15+
16+
# %% [markdown]
17+
# ## 1. Create the DataModule
18+
19+
# %%
20+
datamodule = ImageClassificationData.from_folders(
21+
train_folder=f"{DATA_HYMENOPLERA}/train/",
22+
val_folder=f"{DATA_HYMENOPLERA}/val/",
23+
batch_size=1024,
24+
)
25+
26+
# %% [markdown]
27+
# ## 2. Build the task
28+
29+
# %%
30+
model = ImageClassifier(backbone="resnet18", labels=datamodule.labels)
31+
32+
# %% [markdown]
33+
# ## 3. Create the trainer and finetune the model
34+
35+
# %%
36+
logger = CSVLogger(save_dir="logs/")
37+
trainer = flash.Trainer(logger=logger, max_epochs=3, gpus=1)
38+
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
39+
40+
# %%
41+
metrics = pd.read_csv(f"{trainer.logger.log_dir}/metrics.csv")
42+
del metrics["step"]
43+
metrics.set_index("epoch", inplace=True)
44+
print(metrics.dropna(axis=1, how="all").head())
45+
46+
g = sn.relplot(data=metrics, kind="line")
47+
plt.gcf().set_size_inches(12, 4)
48+
plt.grid()
49+
50+
# %% [markdown]
51+
# ## 4. Predict what's on a few images! ants or bees?
52+
53+
# %%
54+
datamodule = ImageClassificationData.from_files(
55+
predict_files=[
56+
f"{DATA_HYMENOPLERA}/val/bees/65038344_52a45d090d.jpg",
57+
f"{DATA_HYMENOPLERA}/val/bees/590318879_68cf112861.jpg",
58+
f"{DATA_HYMENOPLERA}/val/ants/540543309_ddbb193ee5.jpg",
59+
],
60+
batch_size=3,
61+
)
62+
predictions = trainer.predict(model, datamodule=datamodule, output="labels")
63+
print(predictions)
64+
65+
# %% [markdown]
66+
# ## 5. Save the model!
67+
68+
# %%
69+
trainer.save_checkpoint("image_classification_model.pt")

templates/simple/.meta.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ author: PL team
33
created: 2021-06-15
44
updated: 2021-06-17
55
license: CC
6-
build: 8
6+
build: 9
77
description: |
88
This is a template to show how to contribute a tutorial.
99
requirements:

0 commit comments

Comments
 (0)