|
42 | 42 | "colab": { |
43 | 43 | "base_uri": "https://localhost:8080/" |
44 | 44 | }, |
45 | | - "id": "T4DNH1aoeEhE", |
46 | | - "outputId": "5304f679-8132-4cc3-c50a-c73502c7d44c" |
| 45 | + "id": "eOJLveJqtEO3", |
| 46 | + "outputId": "067a74b2-c5df-464d-a3fa-3f4517a9090a" |
47 | 47 | }, |
48 | 48 | "source": [ |
49 | | - "!pip install wget" |
| 49 | + "# To install only the requirements of this notebook, uncomment the lines below and run this cell\n", |
| 50 | + "\n", |
| 51 | + "# ===========================\n", |
| 52 | + "\n", |
| 53 | + "!pip install numpy==1.19.5\n", |
| 54 | + "!pip install wget==3.2\n", |
| 55 | + "!pip install tensorflow==1.14.0\n", |
| 56 | + "\n", |
| 57 | + "# ===========================" |
50 | 58 | ], |
51 | 59 | "execution_count": 1, |
52 | 60 | "outputs": [ |
53 | 61 | { |
54 | 62 | "output_type": "stream", |
55 | 63 | "text": [ |
56 | | - "Collecting wget\n", |
| 64 | + "Requirement already satisfied: numpy==1.19.5 in /usr/local/lib/python3.7/dist-packages (1.19.5)\n", |
| 65 | + "Collecting wget==3.2\n", |
57 | 66 | " Downloading https://files.pythonhosted.org/packages/47/6a/62e288da7bcda82b935ff0c6cfe542970f04e29c756b0e147251b2fb251f/wget-3.2.zip\n", |
58 | 67 | "Building wheels for collected packages: wget\n", |
59 | 68 | " Building wheel for wget (setup.py) ... \u001b[?25l\u001b[?25hdone\n", |
60 | | - " Created wheel for wget: filename=wget-3.2-cp37-none-any.whl size=9675 sha256=ff413eaabaf2d63367837f0f054d0f5b2518684987287143f3db7927f5fdd75e\n", |
| 69 | + " Created wheel for wget: filename=wget-3.2-cp37-none-any.whl size=9675 sha256=0590de33e3a5654cc81a0a21cf66fa3e8af32bf31e65c5a543d101b6d3fba858\n", |
61 | 70 | " Stored in directory: /root/.cache/pip/wheels/40/15/30/7d8f7cea2902b4db79e3fea550d7d7b85ecb27ef992b618f3f\n", |
62 | 71 | "Successfully built wget\n", |
63 | 72 | "Installing collected packages: wget\n", |
64 | | - "Successfully installed wget-3.2\n" |
| 73 | + "Successfully installed wget-3.2\n", |
| 74 | + "Collecting tensorflow==1.14.0\n", |
| 75 | + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/f4/28/96efba1a516cdacc2e2d6d081f699c001d414cc8ca3250e6d59ae657eb2b/tensorflow-1.14.0-cp37-cp37m-manylinux1_x86_64.whl (109.3MB)\n", |
| 76 | + "\u001b[K |████████████████████████████████| 109.3MB 104kB/s \n", |
| 77 | + "\u001b[?25hRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.12.1)\n", |
| 78 | + "Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.34.1)\n", |
| 79 | + "Collecting tensorflow-estimator<1.15.0rc0,>=1.14.0rc0\n", |
| 80 | + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/3c/d5/21860a5b11caf0678fbc8319341b0ae21a07156911132e0e71bffed0510d/tensorflow_estimator-1.14.0-py2.py3-none-any.whl (488kB)\n", |
| 81 | + "\u001b[K |████████████████████████████████| 491kB 49.6MB/s \n", |
| 82 | + "\u001b[?25hRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.1.0)\n", |
| 83 | + "Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.19.5)\n", |
| 84 | + "Collecting tensorboard<1.15.0,>=1.14.0\n", |
| 85 | + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/91/2d/2ed263449a078cd9c8a9ba50ebd50123adf1f8cfbea1492f9084169b89d9/tensorboard-1.14.0-py3-none-any.whl (3.1MB)\n", |
| 86 | + "\u001b[K |████████████████████████████████| 3.2MB 33.6MB/s \n", |
| 87 | + "\u001b[?25hRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.1.2)\n", |
| 88 | + "Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.8.1)\n", |
| 89 | + "Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.2.0)\n", |
| 90 | + "Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.12.0)\n", |
| 91 | + "Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (1.15.0)\n", |
| 92 | + "Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (3.17.3)\n", |
| 93 | + "Collecting keras-applications>=1.0.6\n", |
| 94 | + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)\n", |
| 95 | + "\u001b[K |████████████████████████████████| 51kB 8.4MB/s \n", |
| 96 | + "\u001b[?25hRequirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.4.0)\n", |
| 97 | + "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.14.0) (0.36.2)\n", |
| 98 | + "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (57.0.0)\n", |
| 99 | + "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (3.3.4)\n", |
| 100 | + "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (1.0.1)\n", |
| 101 | + "Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras-applications>=1.0.6->tensorflow==1.14.0) (3.1.0)\n", |
| 102 | + "Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (4.6.1)\n", |
| 103 | + "Requirement already satisfied: cached-property; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from h5py->keras-applications>=1.0.6->tensorflow==1.14.0) (1.5.2)\n", |
| 104 | + "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (3.5.0)\n", |
| 105 | + "Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow==1.14.0) (3.7.4.3)\n", |
| 106 | + "\u001b[31mERROR: kapre 0.3.5 has requirement tensorflow>=2.0.0, but you'll have tensorflow 1.14.0 which is incompatible.\u001b[0m\n", |
| 107 | + "Installing collected packages: tensorflow-estimator, tensorboard, keras-applications, tensorflow\n", |
| 108 | + " Found existing installation: tensorflow-estimator 2.5.0\n", |
| 109 | + " Uninstalling tensorflow-estimator-2.5.0:\n", |
| 110 | + " Successfully uninstalled tensorflow-estimator-2.5.0\n", |
| 111 | + " Found existing installation: tensorboard 2.5.0\n", |
| 112 | + " Uninstalling tensorboard-2.5.0:\n", |
| 113 | + " Successfully uninstalled tensorboard-2.5.0\n", |
| 114 | + " Found existing installation: tensorflow 2.5.0\n", |
| 115 | + " Uninstalling tensorflow-2.5.0:\n", |
| 116 | + " Successfully uninstalled tensorflow-2.5.0\n", |
| 117 | + "Successfully installed keras-applications-1.0.8 tensorboard-1.14.0 tensorflow-1.14.0 tensorflow-estimator-1.14.0\n" |
65 | 118 | ], |
66 | 119 | "name": "stdout" |
67 | 120 | } |
68 | 121 | ] |
69 | 122 | }, |
| 123 | + { |
| 124 | + "cell_type": "code", |
| 125 | + "metadata": { |
| 126 | + "id": "Ixb_5zcYtEO5" |
| 127 | + }, |
| 128 | + "source": [ |
| 129 | + "# To install the requirements for the entire chapter, uncomment the lines below and run this cell\n", |
| 130 | + "\n", |
| 131 | + "# ===========================\n", |
| 132 | + "\n", |
| 133 | + "# try:\n", |
| 134 | + "# import google.colab\n", |
| 135 | + "# !curl https://raw.githubusercontent.com/practical-nlp/practical-nlp/master/Ch4/ch4-requirements.txt | xargs -n 1 -L 1 pip install\n", |
| 136 | + "# except ModuleNotFoundError:\n", |
| 137 | + "# !pip install -r \"ch4-requirements.txt\"\n", |
| 138 | + "\n", |
| 139 | + "# ===========================" |
| 140 | + ], |
| 141 | + "execution_count": 2, |
| 142 | + "outputs": [] |
| 143 | + }, |
70 | 144 | { |
71 | 145 | "cell_type": "code", |
72 | 146 | "metadata": { |
|
90 | 164 | "from tensorflow.keras.models import Model, Sequential\n", |
91 | 165 | "from tensorflow.keras.initializers import Constant" |
92 | 166 | ], |
93 | | - "execution_count": 2, |
| 167 | + "execution_count": 3, |
94 | 168 | "outputs": [] |
95 | 169 | }, |
96 | 170 | { |
|
125 | 199 | "except ModuleNotFoundError:\n", |
126 | 200 | " \n", |
127 | 201 | " if not os.path.exists('Data/glove.6B'):\n", |
128 | | - " os.mkdir('/Data/glove.6B')\n", |
| 202 | + " os.mkdir('Data/glove.6B')\n", |
129 | 203 | " \n", |
130 | 204 | " url='http://nlp.stanford.edu/data/glove.6B.zip' \n", |
131 | 205 | " wget.download(url,'Data') \n", |
|
140 | 214 | " if not os.path.exists('Data/aclImdb'):\n", |
141 | 215 | " \n", |
142 | 216 | " url='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz' \n", |
143 | | - " wget.download(url,path)\n", |
| 217 | + " wget.download(url,'Data')\n", |
144 | 218 | " \n", |
145 | 219 | " temp='Data/aclImdb_v1.tar.gz' \n", |
146 | 220 | " tar = tarfile.open(temp, \"r:gz\")\n", |
147 | | - " tar.extractall(path) \n", |
| 221 | + " tar.extractall('Data') \n", |
148 | 222 | " tar.close()\n", |
149 | 223 | " \n", |
150 | 224 | " BASE_DIR = 'Data'" |
151 | 225 | ], |
152 | | - "execution_count": 3, |
| 226 | + "execution_count": 4, |
153 | 227 | "outputs": [] |
154 | 228 | }, |
155 | 229 | { |
|
162 | 236 | "TRAIN_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb/train')\n", |
163 | 237 | "TEST_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb/test')" |
164 | 238 | ], |
165 | | - "execution_count": 4, |
| 239 | + "execution_count": 5, |
166 | 240 | "outputs": [] |
167 | 241 | }, |
168 | 242 | { |
|
180 | 254 | "#started off from: https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py\n", |
181 | 255 | "#and from: https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py" |
182 | 256 | ], |
183 | | - "execution_count": 5, |
| 257 | + "execution_count": 6, |
184 | 258 | "outputs": [] |
185 | 259 | }, |
186 | 260 | { |
|
226 | 300 | "#print(test_texts[24999])\n", |
227 | 301 | "#print(test_labels[24999])" |
228 | 302 | ], |
229 | | - "execution_count": 6, |
| 303 | + "execution_count": 7, |
230 | 304 | "outputs": [] |
231 | 305 | }, |
232 | 306 | { |
|
236 | 310 | "base_uri": "https://localhost:8080/" |
237 | 311 | }, |
238 | 312 | "id": "QhhqM0Jdd7fs", |
239 | | - "outputId": "87fa1f87-4a45-4921-b04f-3cec24e6a0c3" |
| 313 | + "outputId": "9b5b394e-bc52-4779-d85d-a0383446051d" |
240 | 314 | }, |
241 | 315 | "source": [ |
242 | 316 | "#Vectorize these text samples into a 2D integer tensor using Keras Tokenizer \n", |
|
248 | 322 | "word_index = tokenizer.word_index \n", |
249 | 323 | "print('Found %s unique tokens.' % len(word_index))" |
250 | 324 | ], |
251 | | - "execution_count": 7, |
| 325 | + "execution_count": 8, |
252 | 326 | "outputs": [ |
253 | 327 | { |
254 | 328 | "output_type": "stream", |
|
266 | 340 | "base_uri": "https://localhost:8080/" |
267 | 341 | }, |
268 | 342 | "id": "_e0V1-bBb5_d", |
269 | | - "outputId": "349db20b-de34-450a-8053-be6aef52f790" |
| 343 | + "outputId": "d866429d-5bb6-43a7-c66e-ed5abbafc4cd" |
270 | 344 | }, |
271 | 345 | "source": [ |
272 | 346 | "#Converting this to sequences to be fed into neural network. Max seq. len is 1000 as set earlier\n", |
|
289 | 363 | "#This is the data we will use for CNN and RNN training\n", |
290 | 364 | "print('Splitting the train data into train and valid is done')" |
291 | 365 | ], |
292 | | - "execution_count": 8, |
| 366 | + "execution_count": 9, |
293 | 367 | "outputs": [ |
294 | 368 | { |
295 | 369 | "output_type": "stream", |
|
307 | 381 | "base_uri": "https://localhost:8080/" |
308 | 382 | }, |
309 | 383 | "id": "WUHqg2vvb5_l", |
310 | | - "outputId": "295331a8-15a4-45de-9177-a88478639ff2" |
| 384 | + "outputId": "8387eda1-18f0-4254-9819-e63191b8fc04" |
311 | 385 | }, |
312 | 386 | "source": [ |
313 | 387 | "print('Preparing embedding matrix.')\n", |
|
345 | 419 | " trainable=False)\n", |
346 | 420 | "print(\"Preparing of embedding matrix is done\")" |
347 | 421 | ], |
348 | | - "execution_count": 9, |
| 422 | + "execution_count": 10, |
349 | 423 | "outputs": [ |
350 | 424 | { |
351 | 425 | "output_type": "stream", |
|
374 | 448 | "base_uri": "https://localhost:8080/" |
375 | 449 | }, |
376 | 450 | "id": "TTY-4K-Ob5_t", |
377 | | - "outputId": "3f795778-06c6-41c5-a3f4-f2273016b020" |
| 451 | + "outputId": "836681ca-936e-400a-8973-0754759bb7cd" |
378 | 452 | }, |
379 | 453 | "source": [ |
380 | 454 | "print('Define a 1D CNN model.')\n", |
|
401 | 475 | "score, acc = cnnmodel.evaluate(test_data, test_labels)\n", |
402 | 476 | "print('Test accuracy with CNN:', acc)" |
403 | 477 | ], |
404 | | - "execution_count": 10, |
| 478 | + "execution_count": 11, |
405 | 479 | "outputs": [ |
406 | 480 | { |
407 | 481 | "output_type": "stream", |
408 | 482 | "text": [ |
409 | 483 | "Define a 1D CNN model.\n", |
410 | | - "157/157 [==============================] - 23s 41ms/step - loss: 0.6748 - acc: 0.6068 - val_loss: 0.5651 - val_acc: 0.7040\n", |
411 | | - "782/782 [==============================] - 5s 7ms/step - loss: 0.5735 - acc: 0.7004\n", |
412 | | - "Test accuracy with CNN: 0.7003999948501587\n" |
| 484 | + "WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n", |
| 485 | + "Instructions for updating:\n", |
| 486 | + "Call initializer instance with the dtype argument instead of passing it to the constructor\n", |
| 487 | + "Train on 20000 samples, validate on 5000 samples\n", |
| 488 | + "20000/20000 [==============================] - 156s 8ms/sample - loss: 0.6706 - acc: 0.5972 - val_loss: 0.5116 - val_acc: 0.7512\n", |
| 489 | + "25000/25000 [==============================] - 67s 3ms/sample - loss: 0.5239 - acc: 0.7415\n", |
| 490 | + "Test accuracy with CNN: 0.74152\n" |
413 | 491 | ], |
414 | 492 | "name": "stdout" |
415 | 493 | } |
|
431 | 509 | "base_uri": "https://localhost:8080/" |
432 | 510 | }, |
433 | 511 | "id": "zI0bISwRb5_w", |
434 | | - "outputId": "f34c3d27-a2fc-4996-fd11-5027d2bc170a" |
| 512 | + "outputId": "d7697504-dacb-415c-b131-b89d6b10c771" |
435 | 513 | }, |
436 | 514 | "source": [ |
437 | 515 | "print(\"Defining and training a CNN model, training embedding layer on the fly instead of using pre-trained embeddings\")\n", |
|
457 | 535 | "score, acc = cnnmodel.evaluate(test_data, test_labels)\n", |
458 | 536 | "print('Test accuracy with CNN:', acc)" |
459 | 537 | ], |
460 | | - "execution_count": 11, |
| 538 | + "execution_count": 12, |
461 | 539 | "outputs": [ |
462 | 540 | { |
463 | 541 | "output_type": "stream", |
464 | 542 | "text": [ |
465 | 543 | "Defining and training a CNN model, training embedding layer on the fly instead of using pre-trained embeddings\n", |
466 | | - "157/157 [==============================] - 14s 82ms/step - loss: 0.5217 - acc: 0.7037 - val_loss: 0.3567 - val_acc: 0.8542\n", |
467 | | - "782/782 [==============================] - 6s 8ms/step - loss: 0.3553 - acc: 0.8464\n", |
468 | | - "Test accuracy with CNN: 0.8463600277900696\n" |
| 544 | + "WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/initializers.py:119: calling RandomUniform.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n", |
| 545 | + "Instructions for updating:\n", |
| 546 | + "Call initializer instance with the dtype argument instead of passing it to the constructor\n", |
| 547 | + "Train on 20000 samples, validate on 5000 samples\n", |
| 548 | + "20000/20000 [==============================] - 234s 12ms/sample - loss: 0.5323 - acc: 0.6927 - val_loss: 0.3179 - val_acc: 0.8644\n", |
| 549 | + "25000/25000 [==============================] - 84s 3ms/sample - loss: 0.3409 - acc: 0.8495\n", |
| 550 | + "Test accuracy with CNN: 0.84948\n" |
469 | 551 | ], |
470 | 552 | "name": "stdout" |
471 | 553 | } |
|
487 | 569 | "base_uri": "https://localhost:8080/" |
488 | 570 | }, |
489 | 571 | "id": "SvBt2Brib5_4", |
490 | | - "outputId": "08bdb448-ee92-4a7c-d65c-0259730cf340" |
| 572 | + "outputId": "008fe9fa-13bf-4127-ba46-67916426ddbe" |
491 | 573 | }, |
492 | 574 | "source": [ |
493 | 575 | "print(\"Defining and training an LSTM model, training embedding layer on the fly\")\n", |
|
510 | 592 | " batch_size=32)\n", |
511 | 593 | "print('Test accuracy with RNN:', acc)" |
512 | 594 | ], |
513 | | - "execution_count": 12, |
| 595 | + "execution_count": 13, |
514 | 596 | "outputs": [ |
515 | 597 | { |
516 | 598 | "output_type": "stream", |
517 | 599 | "text": [ |
518 | 600 | "Defining and training an LSTM model, training embedding layer on the fly\n", |
519 | | - "WARNING:tensorflow:Layer lstm will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n", |
| 601 | + "WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/nn_impl.py:180: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", |
| 602 | + "Instructions for updating:\n", |
| 603 | + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n", |
520 | 604 | "Training the RNN\n", |
521 | | - "625/625 [==============================] - 1343s 2s/step - loss: 0.4818 - accuracy: 0.7686 - val_loss: 0.3970 - val_accuracy: 0.8374\n", |
522 | | - "782/782 [==============================] - 219s 280ms/step - loss: 0.3980 - accuracy: 0.8356\n", |
523 | | - "Test accuracy with RNN: 0.8356000185012817\n" |
| 605 | + "Train on 20000 samples, validate on 5000 samples\n", |
| 606 | + "20000/20000 [==============================] - 1365s 68ms/sample - loss: 0.4997 - acc: 0.7506 - val_loss: 0.3839 - val_acc: 0.8403\n", |
| 607 | + "25000/25000 [==============================] - 198s 8ms/sample - loss: 0.3962 - acc: 0.8300\n", |
| 608 | + "Test accuracy with RNN: 0.82998\n" |
524 | 609 | ], |
525 | 610 | "name": "stdout" |
526 | 611 | } |
|
542 | 627 | "base_uri": "https://localhost:8080/" |
543 | 628 | }, |
544 | 629 | "id": "Eymx0IyCb5_-", |
545 | | - "outputId": "8bdfbaa9-51b9-4fcf-aae8-c3460a7df533" |
| 630 | + "outputId": "da0fa303-a4c4-4b92-ff42-54f1a1d51e45" |
546 | 631 | }, |
547 | 632 | "source": [ |
548 | 633 | "print(\"Defining and training an LSTM model, using pre-trained embedding layer\")\n", |
|
564 | 649 | " batch_size=32)\n", |
565 | 650 | "print('Test accuracy with RNN:', acc)" |
566 | 651 | ], |
567 | | - "execution_count": 13, |
| 652 | + "execution_count": 14, |
568 | 653 | "outputs": [ |
569 | 654 | { |
570 | 655 | "output_type": "stream", |
571 | 656 | "text": [ |
572 | 657 | "Defining and training an LSTM model, using pre-trained embedding layer\n", |
573 | | - "WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n", |
574 | 658 | "Training the RNN\n", |
575 | | - "625/625 [==============================] - 1151s 2s/step - loss: 0.6257 - accuracy: 0.6520 - val_loss: 0.5683 - val_accuracy: 0.7186\n", |
576 | | - "782/782 [==============================] - 218s 279ms/step - loss: 0.5713 - accuracy: 0.7166\n", |
577 | | - "Test accuracy with RNN: 0.7166399955749512\n" |
| 659 | + "Train on 20000 samples, validate on 5000 samples\n", |
| 660 | + "20000/20000 [==============================] - 1156s 58ms/sample - loss: 0.6122 - acc: 0.6602 - val_loss: 0.4538 - val_acc: 0.8017\n", |
| 661 | + "25000/25000 [==============================] - 200s 8ms/sample - loss: 0.4666 - acc: 0.7930\n", |
| 662 | + "Test accuracy with RNN: 0.793\n" |
578 | 663 | ], |
579 | 664 | "name": "stdout" |
580 | 665 | } |
|
0 commit comments