Skip to content

Commit c0c519b

Browse files
committed
fix check error due to missing cuda device from github test env
1 parent 06613a1 commit c0c519b

File tree

4 files changed

+66
-65
lines changed

4 files changed

+66
-65
lines changed

docs/source/tutorials/precompute_sample_usage.ipynb

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
},
4343
{
4444
"cell_type": "code",
45-
"execution_count": 1,
45+
"execution_count": null,
4646
"id": "5268f19a",
4747
"metadata": {},
4848
"outputs": [],
@@ -70,7 +70,9 @@
7070
"torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = True\n",
7171
"# torch.backends.cudnn.benchmark = False\n",
7272
"torch.backends.cuda.matmul.allow_tf32 = True\n",
73-
"# torch.backends.cudnn.deterministic = True"
73+
"# torch.backends.cudnn.deterministic = True\n",
74+
"\n",
75+
"device_str = \"cuda\" if torch.cuda.is_available() else \"cpu\""
7476
]
7577
},
7678
{
@@ -332,7 +334,7 @@
332334
},
333335
{
334336
"cell_type": "code",
335-
"execution_count": 5,
337+
"execution_count": null,
336338
"id": "3008d01f",
337339
"metadata": {},
338340
"outputs": [
@@ -1158,12 +1160,12 @@
11581160
")\n",
11591161
"\n",
11601162
"baseline_predictions = Baseline().predict(\n",
1161-
" val_dataloader, trainer_kwargs=dict(accelerator=\"cuda\"), return_y=True\n",
1163+
" val_dataloader, trainer_kwargs=dict(accelerator=device_str), return_y=True\n",
11621164
")\n",
11631165
"SMAPE()(baseline_predictions.output, baseline_predictions.y)\n",
11641166
"\n",
11651167
"pl.seed_everything(42)\n",
1166-
"trainer = pl.Trainer(accelerator=\"cuda\", gradient_clip_val=0.1)\n",
1168+
"trainer = pl.Trainer(accelerator=device_str, gradient_clip_val=0.1)\n",
11671169
"net = NBeats.from_dataset(\n",
11681170
" training,\n",
11691171
" learning_rate=3e-2,\n",
@@ -1173,7 +1175,7 @@
11731175
")\n",
11741176
"\n",
11751177
"pl.seed_everything(42)\n",
1176-
"trainer = pl.Trainer(accelerator=\"cuda\", gradient_clip_val=0.1)\n",
1178+
"trainer = pl.Trainer(accelerator=device_str, gradient_clip_val=0.1)\n",
11771179
"net = NBeats.from_dataset(\n",
11781180
" training,\n",
11791181
" learning_rate=3e-2,\n",
@@ -1204,7 +1206,7 @@
12041206
"trainer = pl.Trainer(\n",
12051207
" logger=False,\n",
12061208
" max_epochs=100,\n",
1207-
" accelerator=\"cuda\",\n",
1209+
" accelerator=device_str,\n",
12081210
" enable_model_summary=True,\n",
12091211
" gradient_clip_val=0.1,\n",
12101212
" callbacks=[early_stop_callback],\n",
@@ -1319,7 +1321,7 @@
13191321
},
13201322
{
13211323
"cell_type": "code",
1322-
"execution_count": 8,
1324+
"execution_count": null,
13231325
"id": "ec533c49",
13241326
"metadata": {},
13251327
"outputs": [
@@ -2030,12 +2032,12 @@
20302032
")\n",
20312033
"\n",
20322034
"baseline_predictions = Baseline().predict(\n",
2033-
" val_dataloader, trainer_kwargs=dict(accelerator=\"cuda\"), return_y=True\n",
2035+
" val_dataloader, trainer_kwargs=dict(accelerator=device_str), return_y=True\n",
20342036
")\n",
20352037
"SMAPE()(baseline_predictions.output, baseline_predictions.y)\n",
20362038
"\n",
20372039
"pl.seed_everything(42)\n",
2038-
"trainer = pl.Trainer(accelerator=\"cuda\", gradient_clip_val=0.1)\n",
2040+
"trainer = pl.Trainer(accelerator=device_str, gradient_clip_val=0.1)\n",
20392041
"net = NBeats.from_dataset(\n",
20402042
" training,\n",
20412043
" learning_rate=3e-2,\n",
@@ -2045,7 +2047,7 @@
20452047
")\n",
20462048
"\n",
20472049
"pl.seed_everything(42)\n",
2048-
"trainer = pl.Trainer(accelerator=\"cuda\", gradient_clip_val=0.1)\n",
2050+
"trainer = pl.Trainer(accelerator=device_str, gradient_clip_val=0.1)\n",
20492051
"net = NBeats.from_dataset(\n",
20502052
" training,\n",
20512053
" learning_rate=3e-2,\n",
@@ -2076,7 +2078,7 @@
20762078
"trainer = pl.Trainer(\n",
20772079
" logger=False,\n",
20782080
" max_epochs=100,\n",
2079-
" accelerator=\"cuda\",\n",
2081+
" accelerator=device_str,\n",
20802082
" enable_model_summary=True,\n",
20812083
" gradient_clip_val=0.1,\n",
20822084
" callbacks=[early_stop_callback],\n",

examples/precompute_bench_cpu.ipynb

Lines changed: 22 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
},
3232
{
3333
"cell_type": "code",
34-
"execution_count": 1,
34+
"execution_count": null,
3535
"id": "5268f19a",
3636
"metadata": {},
3737
"outputs": [],
@@ -52,14 +52,7 @@
5252
"from pytorch_forecasting.data.examples import generate_ar_data\n",
5353
"from pytorch_forecasting.metrics import MAE, SMAPE, MQF2DistributionLoss, QuantileLoss\n",
5454
"\n",
55-
"torch.set_float32_matmul_precision(\"medium\")\n",
56-
"torch.backends.cuda.matmul.allow_tf32 = True\n",
57-
"torch.backends.cudnn.enabled = True\n",
58-
"torch.backends.cudnn.allow_tf32 = True\n",
59-
"torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = True\n",
60-
"# torch.backends.cudnn.benchmark = False\n",
61-
"torch.backends.cuda.matmul.allow_tf32 = True\n",
62-
"# torch.backends.cudnn.deterministic = True"
55+
"torch.set_float32_matmul_precision(\"medium\")"
6356
]
6457
},
6558
{
@@ -275,14 +268,12 @@
275268
"\n",
276269
"completion_time = end_time - start_time\n",
277270
"\n",
278-
"print(f\"Time: {completion_time:.6f} seconds\")\n",
279-
"\n",
280-
"torch.cuda.empty_cache()"
271+
"print(f\"Time: {completion_time:.6f} seconds\")"
281272
]
282273
},
283274
{
284275
"cell_type": "code",
285-
"execution_count": 5,
276+
"execution_count": null,
286277
"id": "b1a183e2",
287278
"metadata": {},
288279
"outputs": [
@@ -302,13 +293,12 @@
302293
"baseline_predictions = Baseline().predict(\n",
303294
" val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n",
304295
")\n",
305-
"SMAPE()(baseline_predictions.output, baseline_predictions.y)\n",
306-
"torch.cuda.empty_cache()"
296+
"SMAPE()(baseline_predictions.output, baseline_predictions.y)"
307297
]
308298
},
309299
{
310300
"cell_type": "code",
311-
"execution_count": 6,
301+
"execution_count": null,
312302
"id": "2e696d76",
313303
"metadata": {},
314304
"outputs": [
@@ -336,9 +326,7 @@
336326
" backcast_loss_ratio=0.0,\n",
337327
" hidden_size=64,\n",
338328
" optimizer=\"AdamW\",\n",
339-
")\n",
340-
"\n",
341-
"torch.cuda.empty_cache()"
329+
")"
342330
]
343331
},
344332
{
@@ -351,7 +339,7 @@
351339
},
352340
{
353341
"cell_type": "code",
354-
"execution_count": 7,
342+
"execution_count": null,
355343
"id": "e1c56874",
356344
"metadata": {},
357345
"outputs": [
@@ -424,16 +412,13 @@
424412
"fig.show()\n",
425413
"net.hparams.learning_rate = res.suggestion()\n",
426414
"\n",
427-
"torch.cuda.synchronize()\n",
428415
"end_time = time.time()\n",
429416
"\n",
430417
"completion_time = end_time - start_time\n",
431418
"\n",
432419
"time_hyperopt.append(completion_time)\n",
433420
"\n",
434-
"print(f\"Time: {completion_time:.6f} seconds\")\n",
435-
"\n",
436-
"torch.cuda.empty_cache()"
421+
"print(f\"Time: {completion_time:.6f} seconds\")"
437422
]
438423
},
439424
{
@@ -446,7 +431,7 @@
446431
},
447432
{
448433
"cell_type": "code",
449-
"execution_count": 8,
434+
"execution_count": null,
450435
"id": "b4bd82cf",
451436
"metadata": {},
452437
"outputs": [
@@ -629,16 +614,14 @@
629614
" train_dataloaders=train_dataloader,\n",
630615
" val_dataloaders=val_dataloader,\n",
631616
")\n",
632-
"torch.cuda.synchronize()\n",
617+
"\n",
633618
"end_time = time.time()\n",
634619
"\n",
635620
"completion_time = end_time - start_time\n",
636621
"\n",
637622
"time_train.append(completion_time)\n",
638623
"\n",
639-
"print(f\"Time: {completion_time:.6f} seconds\")\n",
640-
"\n",
641-
"torch.cuda.empty_cache()"
624+
"print(f\"Time: {completion_time:.6f} seconds\")"
642625
]
643626
},
644627
{
@@ -724,14 +707,12 @@
724707
"\n",
725708
"completion_time = end_time - start_time\n",
726709
"\n",
727-
"print(f\"Time: {completion_time:.6f} seconds\")\n",
728-
"\n",
729-
"torch.cuda.empty_cache()"
710+
"print(f\"Time: {completion_time:.6f} seconds\")"
730711
]
731712
},
732713
{
733714
"cell_type": "code",
734-
"execution_count": 10,
715+
"execution_count": null,
735716
"id": "294470a5",
736717
"metadata": {},
737718
"outputs": [
@@ -751,13 +732,12 @@
751732
"baseline_predictions = Baseline().predict(\n",
752733
" val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n",
753734
")\n",
754-
"SMAPE()(baseline_predictions.output, baseline_predictions.y)\n",
755-
"torch.cuda.empty_cache()"
735+
"SMAPE()(baseline_predictions.output, baseline_predictions.y)"
756736
]
757737
},
758738
{
759739
"cell_type": "code",
760-
"execution_count": 11,
740+
"execution_count": null,
761741
"id": "50d6725e",
762742
"metadata": {},
763743
"outputs": [
@@ -785,9 +765,7 @@
785765
" backcast_loss_ratio=0.0,\n",
786766
" hidden_size=64,\n",
787767
" optimizer=\"AdamW\",\n",
788-
")\n",
789-
"\n",
790-
"torch.cuda.empty_cache()"
768+
")"
791769
]
792770
},
793771
{
@@ -800,7 +778,7 @@
800778
},
801779
{
802780
"cell_type": "code",
803-
"execution_count": 12,
781+
"execution_count": null,
804782
"id": "04c933d8",
805783
"metadata": {},
806784
"outputs": [
@@ -873,16 +851,13 @@
873851
"fig.show()\n",
874852
"net.hparams.learning_rate = res.suggestion()\n",
875853
"\n",
876-
"torch.cuda.synchronize()\n",
877854
"end_time = time.time()\n",
878855
"\n",
879856
"completion_time = end_time - start_time\n",
880857
"\n",
881858
"time_hyperopt.append(completion_time)\n",
882859
"\n",
883-
"print(f\"Time: {completion_time:.6f} seconds\")\n",
884-
"\n",
885-
"torch.cuda.empty_cache()"
860+
"print(f\"Time: {completion_time:.6f} seconds\")"
886861
]
887862
},
888863
{
@@ -895,7 +870,7 @@
895870
},
896871
{
897872
"cell_type": "code",
898-
"execution_count": 13,
873+
"execution_count": null,
899874
"id": "31d3cc8f",
900875
"metadata": {},
901876
"outputs": [
@@ -1079,16 +1054,14 @@
10791054
" train_dataloaders=train_dataloader,\n",
10801055
" val_dataloaders=val_dataloader,\n",
10811056
")\n",
1082-
"torch.cuda.synchronize()\n",
1057+
"\n",
10831058
"end_time = time.time()\n",
10841059
"\n",
10851060
"completion_time = end_time - start_time\n",
10861061
"\n",
10871062
"time_train.append(completion_time)\n",
10881063
"\n",
1089-
"print(f\"Time: {completion_time:.6f} seconds\")\n",
1090-
"\n",
1091-
"torch.cuda.empty_cache()"
1064+
"print(f\"Time: {completion_time:.6f} seconds\")"
10921065
]
10931066
},
10941067
{

examples/precompute_bench_gpu.ipynb

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,25 @@
3535
},
3636
{
3737
"cell_type": "code",
38-
"execution_count": 1,
38+
"execution_count": null,
3939
"id": "5268f19a",
4040
"metadata": {},
4141
"outputs": [],
4242
"source": [
43+
"import torch\n",
44+
"\n",
45+
"device_str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
46+
"\n",
47+
"\n",
48+
"class StopExecution(Exception):\n",
49+
" def _render_traceback_(self):\n",
50+
" return []\n",
51+
"\n",
52+
"\n",
53+
"if device_str != \"cuda\":\n",
54+
" print(\"CUDA not available. Aborting notebook execution.\")\n",
55+
" raise StopExecution\n",
56+
"\n",
4357
"import warnings\n",
4458
"\n",
4559
"warnings.filterwarnings(\"ignore\")\n",
@@ -49,7 +63,6 @@
4963
"import matplotlib.pyplot as plt\n",
5064
"import numpy as np\n",
5165
"import pandas as pd\n",
52-
"import torch\n",
5366
"\n",
5467
"from pytorch_forecasting import Baseline, NHiTS, TimeSeriesDataSet\n",
5568
"from pytorch_forecasting.data import NaNLabelEncoder\n",

examples/precompute_bench_gpu_small_data.ipynb

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,25 @@
3535
},
3636
{
3737
"cell_type": "code",
38-
"execution_count": 5,
38+
"execution_count": null,
3939
"id": "5268f19a",
4040
"metadata": {},
4141
"outputs": [],
4242
"source": [
43+
"import torch\n",
44+
"\n",
45+
"device_str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
46+
"\n",
47+
"\n",
48+
"class StopExecution(Exception):\n",
49+
" def _render_traceback_(self):\n",
50+
" return []\n",
51+
"\n",
52+
"\n",
53+
"if device_str != \"cuda\":\n",
54+
" print(\"CUDA not available. Aborting notebook execution.\")\n",
55+
" raise StopExecution\n",
56+
"\n",
4357
"import warnings\n",
4458
"\n",
4559
"warnings.filterwarnings(\"ignore\")\n",
@@ -49,7 +63,6 @@
4963
"import matplotlib.pyplot as plt\n",
5064
"import numpy as np\n",
5165
"import pandas as pd\n",
52-
"import torch\n",
5366
"\n",
5467
"from pytorch_forecasting import Baseline, NHiTS, TimeSeriesDataSet\n",
5568
"from pytorch_forecasting.data import NaNLabelEncoder\n",

0 commit comments

Comments
 (0)