Skip to content

Commit 36b5cb7

Browse files
authored
[Ch4Nb05] Installed dependencies using ch4-requirements.txt
1 parent ec1d2a6 commit 36b5cb7

File tree

1 file changed

+127
-42
lines changed

1 file changed

+127
-42
lines changed

Ch4/05_DeepNN_Example.ipynb

Lines changed: 127 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -42,31 +42,105 @@
4242
"colab": {
4343
"base_uri": "https://localhost:8080/"
4444
},
45-
"id": "T4DNH1aoeEhE",
46-
"outputId": "5304f679-8132-4cc3-c50a-c73502c7d44c"
45+
"id": "eOJLveJqtEO3",
46+
"outputId": "067a74b2-c5df-464d-a3fa-3f4517a9090a"
4747
},
4848
"source": [
49-
"!pip install wget"
49+
"# To install only the requirements of this notebook, uncomment the lines below and run this cell\n",
50+
"\n",
51+
"# ===========================\n",
52+
"\n",
53+
"!pip install numpy==1.19.5\n",
54+
"!pip install wget==3.2\n",
55+
"!pip install tensorflow==1.14.0\n",
56+
"\n",
57+
"# ==========================="
5058
],
5159
"execution_count": 1,
5260
"outputs": [
5361
{
5462
"output_type": "stream",
5563
"text": [
56-
"Collecting wget\n",
64+
"Requirement already satisfied: numpy==1.19.5 in /usr/local/lib/python3.7/dist-packages (1.19.5)\n",
65+
"Collecting wget==3.2\n",
5766
" Downloading https://files.pythonhosted.org/packages/47/6a/62e288da7bcda82b935ff0c6cfe542970f04e29c756b0e147251b2fb251f/wget-3.2.zip\n",
5867
"Building wheels for collected packages: wget\n",
5968
" Building wheel for wget (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
60-
" Created wheel for wget: filename=wget-3.2-cp37-none-any.whl size=9675 sha256=ff413eaabaf2d63367837f0f054d0f5b2518684987287143f3db7927f5fdd75e\n",
69+
" Created wheel for wget: filename=wget-3.2-cp37-none-any.whl size=9675 sha256=0590de33e3a5654cc81a0a21cf66fa3e8af32bf31e65c5a543d101b6d3fba858\n",
6170
" Stored in directory: /root/.cache/pip/wheels/40/15/30/7d8f7cea2902b4db79e3fea550d7d7b85ecb27ef992b618f3f\n",
6271
"Successfully built wget\n",
6372
"Installing collected packages: wget\n",
64-
"Successfully installed wget-3.2\n"
73+
"Successfully installed wget-3.2\n",
74+
"Collecting tensorflow==1.14.0\n",
75+
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/f4/28/96efba1a516cdacc2e2d6d081f699c001d414cc8ca3250e6d59ae657eb2b/tensorflow-1.14.0-cp37-cp37m-manylinux1_x86_64.whl (109.3MB)\n",
76+
"\u001b[K |████████████████████████████████| 109.3MB 104kB/s \n",
77+
"\u001b[?25hRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.12.1)\n",
78+
"Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.34.1)\n",
79+
"Collecting tensorflow-estimator<1.15.0rc0,>=1.14.0rc0\n",
80+
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/3c/d5/21860a5b11caf0678fbc8319341b0ae21a07156911132e0e71bffed0510d/tensorflow_estimator-1.14.0-py2.py3-none-any.whl (488kB)\n",
81+
"\u001b[K |████████████████████████████████| 491kB 49.6MB/s \n",
82+
"\u001b[?25hRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.1.0)\n",
83+
"Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.19.5)\n",
84+
"Collecting tensorboard<1.15.0,>=1.14.0\n",
85+
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/91/2d/2ed263449a078cd9c8a9ba50ebd50123adf1f8cfbea1492f9084169b89d9/tensorboard-1.14.0-py3-none-any.whl (3.1MB)\n",
86+
"\u001b[K |████████████████████████████████| 3.2MB 33.6MB/s \n",
87+
"\u001b[?25hRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.1.2)\n",
88+
"Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.8.1)\n",
89+
"Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.2.0)\n",
90+
"Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.12.0)\n",
91+
"Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.15.0)\n",
92+
"Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (3.17.3)\n",
93+
"Collecting keras-applications>=1.0.6\n",
94+
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)\n",
95+
"\u001b[K |████████████████████████████████| 51kB 8.4MB/s \n",
96+
"\u001b[?25hRequirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.4.0)\n",
97+
"Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.36.2)\n",
98+
"Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (57.0.0)\n",
99+
"Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (3.3.4)\n",
100+
"Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (1.0.1)\n",
101+
"Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras-applications>=1.0.6->tensorflow==1.14.0) (3.1.0)\n",
102+
"Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (4.6.1)\n",
103+
"Requirement already satisfied: cached-property; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from h5py->keras-applications>=1.0.6->tensorflow==1.14.0) (1.5.2)\n",
104+
"Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (3.5.0)\n",
105+
"Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (3.7.4.3)\n",
106+
"\u001b[31mERROR: kapre 0.3.5 has requirement tensorflow>=2.0.0, but you'll have tensorflow 1.14.0 which is incompatible.\u001b[0m\n",
107+
"Installing collected packages: tensorflow-estimator, tensorboard, keras-applications, tensorflow\n",
108+
" Found existing installation: tensorflow-estimator 2.5.0\n",
109+
" Uninstalling tensorflow-estimator-2.5.0:\n",
110+
" Successfully uninstalled tensorflow-estimator-2.5.0\n",
111+
" Found existing installation: tensorboard 2.5.0\n",
112+
" Uninstalling tensorboard-2.5.0:\n",
113+
" Successfully uninstalled tensorboard-2.5.0\n",
114+
" Found existing installation: tensorflow 2.5.0\n",
115+
" Uninstalling tensorflow-2.5.0:\n",
116+
" Successfully uninstalled tensorflow-2.5.0\n",
117+
"Successfully installed keras-applications-1.0.8 tensorboard-1.14.0 tensorflow-1.14.0 tensorflow-estimator-1.14.0\n"
65118
],
66119
"name": "stdout"
67120
}
68121
]
69122
},
123+
{
124+
"cell_type": "code",
125+
"metadata": {
126+
"id": "Ixb_5zcYtEO5"
127+
},
128+
"source": [
129+
"# To install the requirements for the entire chapter, uncomment the lines below and run this cell\n",
130+
"\n",
131+
"# ===========================\n",
132+
"\n",
133+
"# try:\n",
134+
"# import google.colab\n",
135+
"# !curl https://raw.githubusercontent.com/practical-nlp/practical-nlp/master/Ch4/ch4-requirements.txt | xargs -n 1 -L 1 pip install\n",
136+
"# except ModuleNotFoundError:\n",
137+
"# !pip install -r \"ch4-requirements.txt\"\n",
138+
"\n",
139+
"# ==========================="
140+
],
141+
"execution_count": 2,
142+
"outputs": []
143+
},
70144
{
71145
"cell_type": "code",
72146
"metadata": {
@@ -90,7 +164,7 @@
90164
"from tensorflow.keras.models import Model, Sequential\n",
91165
"from tensorflow.keras.initializers import Constant"
92166
],
93-
"execution_count": 2,
167+
"execution_count": 3,
94168
"outputs": []
95169
},
96170
{
@@ -125,7 +199,7 @@
125199
"except ModuleNotFoundError:\n",
126200
" \n",
127201
" if not os.path.exists('Data/glove.6B'):\n",
128-
" os.mkdir('/Data/glove.6B')\n",
202+
" os.mkdir('Data/glove.6B')\n",
129203
" \n",
130204
" url='http://nlp.stanford.edu/data/glove.6B.zip' \n",
131205
" wget.download(url,'Data') \n",
@@ -140,16 +214,16 @@
140214
" if not os.path.exists('Data/aclImdb'):\n",
141215
" \n",
142216
" url='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz' \n",
143-
" wget.download(url,path)\n",
217+
" wget.download(url,'Data')\n",
144218
" \n",
145219
" temp='Data/aclImdb_v1.tar.gz' \n",
146220
" tar = tarfile.open(temp, \"r:gz\")\n",
147-
" tar.extractall(path) \n",
221+
" tar.extractall('Data') \n",
148222
" tar.close()\n",
149223
" \n",
150224
" BASE_DIR = 'Data'"
151225
],
152-
"execution_count": 3,
226+
"execution_count": 4,
153227
"outputs": []
154228
},
155229
{
@@ -162,7 +236,7 @@
162236
"TRAIN_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb/train')\n",
163237
"TEST_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb/test')"
164238
],
165-
"execution_count": 4,
239+
"execution_count": 5,
166240
"outputs": []
167241
},
168242
{
@@ -180,7 +254,7 @@
180254
"#started off from: https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py\n",
181255
"#and from: https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py"
182256
],
183-
"execution_count": 5,
257+
"execution_count": 6,
184258
"outputs": []
185259
},
186260
{
@@ -226,7 +300,7 @@
226300
"#print(test_texts[24999])\n",
227301
"#print(test_labels[24999])"
228302
],
229-
"execution_count": 6,
303+
"execution_count": 7,
230304
"outputs": []
231305
},
232306
{
@@ -236,7 +310,7 @@
236310
"base_uri": "https://localhost:8080/"
237311
},
238312
"id": "QhhqM0Jdd7fs",
239-
"outputId": "87fa1f87-4a45-4921-b04f-3cec24e6a0c3"
313+
"outputId": "9b5b394e-bc52-4779-d85d-a0383446051d"
240314
},
241315
"source": [
242316
"#Vectorize these text samples into a 2D integer tensor using Keras Tokenizer \n",
@@ -248,7 +322,7 @@
248322
"word_index = tokenizer.word_index \n",
249323
"print('Found %s unique tokens.' % len(word_index))"
250324
],
251-
"execution_count": 7,
325+
"execution_count": 8,
252326
"outputs": [
253327
{
254328
"output_type": "stream",
@@ -266,7 +340,7 @@
266340
"base_uri": "https://localhost:8080/"
267341
},
268342
"id": "_e0V1-bBb5_d",
269-
"outputId": "349db20b-de34-450a-8053-be6aef52f790"
343+
"outputId": "d866429d-5bb6-43a7-c66e-ed5abbafc4cd"
270344
},
271345
"source": [
272346
"#Converting this to sequences to be fed into neural network. Max seq. len is 1000 as set earlier\n",
@@ -289,7 +363,7 @@
289363
"#This is the data we will use for CNN and RNN training\n",
290364
"print('Splitting the train data into train and valid is done')"
291365
],
292-
"execution_count": 8,
366+
"execution_count": 9,
293367
"outputs": [
294368
{
295369
"output_type": "stream",
@@ -307,7 +381,7 @@
307381
"base_uri": "https://localhost:8080/"
308382
},
309383
"id": "WUHqg2vvb5_l",
310-
"outputId": "295331a8-15a4-45de-9177-a88478639ff2"
384+
"outputId": "8387eda1-18f0-4254-9819-e63191b8fc04"
311385
},
312386
"source": [
313387
"print('Preparing embedding matrix.')\n",
@@ -345,7 +419,7 @@
345419
" trainable=False)\n",
346420
"print(\"Preparing of embedding matrix is done\")"
347421
],
348-
"execution_count": 9,
422+
"execution_count": 10,
349423
"outputs": [
350424
{
351425
"output_type": "stream",
@@ -374,7 +448,7 @@
374448
"base_uri": "https://localhost:8080/"
375449
},
376450
"id": "TTY-4K-Ob5_t",
377-
"outputId": "3f795778-06c6-41c5-a3f4-f2273016b020"
451+
"outputId": "836681ca-936e-400a-8973-0754759bb7cd"
378452
},
379453
"source": [
380454
"print('Define a 1D CNN model.')\n",
@@ -401,15 +475,19 @@
401475
"score, acc = cnnmodel.evaluate(test_data, test_labels)\n",
402476
"print('Test accuracy with CNN:', acc)"
403477
],
404-
"execution_count": 10,
478+
"execution_count": 11,
405479
"outputs": [
406480
{
407481
"output_type": "stream",
408482
"text": [
409483
"Define a 1D CNN model.\n",
410-
"157/157 [==============================] - 23s 41ms/step - loss: 0.6748 - acc: 0.6068 - val_loss: 0.5651 - val_acc: 0.7040\n",
411-
"782/782 [==============================] - 5s 7ms/step - loss: 0.5735 - acc: 0.7004\n",
412-
"Test accuracy with CNN: 0.7003999948501587\n"
484+
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
485+
"Instructions for updating:\n",
486+
"Call initializer instance with the dtype argument instead of passing it to the constructor\n",
487+
"Train on 20000 samples, validate on 5000 samples\n",
488+
"20000/20000 [==============================] - 156s 8ms/sample - loss: 0.6706 - acc: 0.5972 - val_loss: 0.5116 - val_acc: 0.7512\n",
489+
"25000/25000 [==============================] - 67s 3ms/sample - loss: 0.5239 - acc: 0.7415\n",
490+
"Test accuracy with CNN: 0.74152\n"
413491
],
414492
"name": "stdout"
415493
}
@@ -431,7 +509,7 @@
431509
"base_uri": "https://localhost:8080/"
432510
},
433511
"id": "zI0bISwRb5_w",
434-
"outputId": "f34c3d27-a2fc-4996-fd11-5027d2bc170a"
512+
"outputId": "d7697504-dacb-415c-b131-b89d6b10c771"
435513
},
436514
"source": [
437515
"print(\"Defining and training a CNN model, training embedding layer on the fly instead of using pre-trained embeddings\")\n",
@@ -457,15 +535,19 @@
457535
"score, acc = cnnmodel.evaluate(test_data, test_labels)\n",
458536
"print('Test accuracy with CNN:', acc)"
459537
],
460-
"execution_count": 11,
538+
"execution_count": 12,
461539
"outputs": [
462540
{
463541
"output_type": "stream",
464542
"text": [
465543
"Defining and training a CNN model, training embedding layer on the fly instead of using pre-trained embeddings\n",
466-
"157/157 [==============================] - 14s 82ms/step - loss: 0.5217 - acc: 0.7037 - val_loss: 0.3567 - val_acc: 0.8542\n",
467-
"782/782 [==============================] - 6s 8ms/step - loss: 0.3553 - acc: 0.8464\n",
468-
"Test accuracy with CNN: 0.8463600277900696\n"
544+
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/initializers.py:119: calling RandomUniform.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
545+
"Instructions for updating:\n",
546+
"Call initializer instance with the dtype argument instead of passing it to the constructor\n",
547+
"Train on 20000 samples, validate on 5000 samples\n",
548+
"20000/20000 [==============================] - 234s 12ms/sample - loss: 0.5323 - acc: 0.6927 - val_loss: 0.3179 - val_acc: 0.8644\n",
549+
"25000/25000 [==============================] - 84s 3ms/sample - loss: 0.3409 - acc: 0.8495\n",
550+
"Test accuracy with CNN: 0.84948\n"
469551
],
470552
"name": "stdout"
471553
}
@@ -487,7 +569,7 @@
487569
"base_uri": "https://localhost:8080/"
488570
},
489571
"id": "SvBt2Brib5_4",
490-
"outputId": "08bdb448-ee92-4a7c-d65c-0259730cf340"
572+
"outputId": "008fe9fa-13bf-4127-ba46-67916426ddbe"
491573
},
492574
"source": [
493575
"print(\"Defining and training an LSTM model, training embedding layer on the fly\")\n",
@@ -510,17 +592,20 @@
510592
" batch_size=32)\n",
511593
"print('Test accuracy with RNN:', acc)"
512594
],
513-
"execution_count": 12,
595+
"execution_count": 13,
514596
"outputs": [
515597
{
516598
"output_type": "stream",
517599
"text": [
518600
"Defining and training an LSTM model, training embedding layer on the fly\n",
519-
"WARNING:tensorflow:Layer lstm will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
601+
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/nn_impl.py:180: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
602+
"Instructions for updating:\n",
603+
"Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
520604
"Training the RNN\n",
521-
"625/625 [==============================] - 1343s 2s/step - loss: 0.4818 - accuracy: 0.7686 - val_loss: 0.3970 - val_accuracy: 0.8374\n",
522-
"782/782 [==============================] - 219s 280ms/step - loss: 0.3980 - accuracy: 0.8356\n",
523-
"Test accuracy with RNN: 0.8356000185012817\n"
605+
"Train on 20000 samples, validate on 5000 samples\n",
606+
"20000/20000 [==============================] - 1365s 68ms/sample - loss: 0.4997 - acc: 0.7506 - val_loss: 0.3839 - val_acc: 0.8403\n",
607+
"25000/25000 [==============================] - 198s 8ms/sample - loss: 0.3962 - acc: 0.8300\n",
608+
"Test accuracy with RNN: 0.82998\n"
524609
],
525610
"name": "stdout"
526611
}
@@ -542,7 +627,7 @@
542627
"base_uri": "https://localhost:8080/"
543628
},
544629
"id": "Eymx0IyCb5_-",
545-
"outputId": "8bdfbaa9-51b9-4fcf-aae8-c3460a7df533"
630+
"outputId": "da0fa303-a4c4-4b92-ff42-54f1a1d51e45"
546631
},
547632
"source": [
548633
"print(\"Defining and training an LSTM model, using pre-trained embedding layer\")\n",
@@ -564,17 +649,17 @@
564649
" batch_size=32)\n",
565650
"print('Test accuracy with RNN:', acc)"
566651
],
567-
"execution_count": 13,
652+
"execution_count": 14,
568653
"outputs": [
569654
{
570655
"output_type": "stream",
571656
"text": [
572657
"Defining and training an LSTM model, using pre-trained embedding layer\n",
573-
"WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
574658
"Training the RNN\n",
575-
"625/625 [==============================] - 1151s 2s/step - loss: 0.6257 - accuracy: 0.6520 - val_loss: 0.5683 - val_accuracy: 0.7186\n",
576-
"782/782 [==============================] - 218s 279ms/step - loss: 0.5713 - accuracy: 0.7166\n",
577-
"Test accuracy with RNN: 0.7166399955749512\n"
659+
"Train on 20000 samples, validate on 5000 samples\n",
660+
"20000/20000 [==============================] - 1156s 58ms/sample - loss: 0.6122 - acc: 0.6602 - val_loss: 0.4538 - val_acc: 0.8017\n",
661+
"25000/25000 [==============================] - 200s 8ms/sample - loss: 0.4666 - acc: 0.7930\n",
662+
"Test accuracy with RNN: 0.793\n"
578663
],
579664
"name": "stdout"
580665
}

0 commit comments

Comments
 (0)