Skip to content

Commit 9101a91

Browse files
authored
Merge pull request #832 from mhucka/mhucka-update-nbformat
Update version of nbformat & run notebooks through updated version
2 parents fa932bf + c83e2be commit 9101a91

File tree

11 files changed

+6211
-6062
lines changed

11 files changed

+6211
-6062
lines changed

docs/tutorials/barren_plateaus.ipynb

Lines changed: 525 additions & 524 deletions
Large diffs are not rendered by default.

docs/tutorials/gradients.ipynb

Lines changed: 827 additions & 826 deletions
Large diffs are not rendered by default.

docs/tutorials/hello_many_worlds.ipynb

Lines changed: 1338 additions & 1334 deletions
Large diffs are not rendered by default.

docs/tutorials/mnist.ipynb

Lines changed: 1137 additions & 1131 deletions
Large diffs are not rendered by default.

docs/tutorials/noise.ipynb

Lines changed: 834 additions & 803 deletions
Large diffs are not rendered by default.

docs/tutorials/qcnn.ipynb

Lines changed: 1211 additions & 1210 deletions
Large diffs are not rendered by default.

docs/tutorials/quantum_data.ipynb

Lines changed: 130 additions & 105 deletions
Large diffs are not rendered by default.

docs/tutorials/quantum_reinforcement_learning.ipynb

Lines changed: 148 additions & 91 deletions
Large diffs are not rendered by default.

docs/tutorials/research_tools.ipynb

Lines changed: 59 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -86,22 +86,23 @@
8686
"!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3 tensorboard_plugin_profile==2.15.0"
8787
]
8888
},
89-
{
90-
"cell_type": "code",
91-
"execution_count": 0,
92-
"metadata": {
93-
"colab": {},
94-
"colab_type": "code",
95-
"id": "4Ql5PW-ACO0J"
96-
},
97-
"outputs": [],
98-
"source": [
99-
"# Update package resources to account for version changes.\n",
100-
"import importlib, pkg_resources\n",
101-
"importlib.reload(pkg_resources)"
102-
]
103-
},
104-
{
89+
{
90+
"cell_type": "code",
91+
"execution_count": 0,
92+
"metadata": {
93+
"colab": {},
94+
"colab_type": "code",
95+
"id": "4Ql5PW-ACO0J"
96+
},
97+
"outputs": [],
98+
"source": [
99+
"# Update package resources to account for version changes.\n",
100+
"import importlib, pkg_resources\n",
101+
"\n",
102+
"importlib.reload(pkg_resources)"
103+
]
104+
},
105+
{
105106
"cell_type": "code",
106107
"execution_count": null,
107108
"metadata": {
@@ -159,9 +160,11 @@
159160
" qubits, depth=2)\n",
160161
" return random_circuit\n",
161162
"\n",
163+
"\n",
162164
"def generate_data(circuit, n_samples):\n",
163165
" \"\"\"Draw n_samples samples from circuit into a tf.Tensor.\"\"\"\n",
164-
" return tf.squeeze(tfq.layers.Sample()(circuit, repetitions=n_samples).to_tensor())"
166+
" return tf.squeeze(tfq.layers.Sample()(circuit,\n",
167+
" repetitions=n_samples).to_tensor())"
165168
]
166169
},
167170
{
@@ -270,16 +273,20 @@
270273
" \"\"\"Convert tensor of bitstrings to tensor of ints.\"\"\"\n",
271274
" sigs = tf.constant([1 << i for i in range(N_QUBITS)], dtype=tf.int32)\n",
272275
" rounded_bits = tf.clip_by_value(tf.math.round(\n",
273-
" tf.cast(bits, dtype=tf.dtypes.float32)), clip_value_min=0, clip_value_max=1)\n",
274-
" return tf.einsum('jk,k->j', tf.cast(rounded_bits, dtype=tf.dtypes.int32), sigs)\n",
276+
" tf.cast(bits, dtype=tf.dtypes.float32)),\n",
277+
" clip_value_min=0,\n",
278+
" clip_value_max=1)\n",
279+
" return tf.einsum('jk,k->j', tf.cast(rounded_bits, dtype=tf.dtypes.int32),\n",
280+
" sigs)\n",
281+
"\n",
275282
"\n",
276283
"@tf.function\n",
277284
"def xeb_fid(bits):\n",
278285
" \"\"\"Compute linear XEB fidelity of bitstrings.\"\"\"\n",
279286
" final_probs = tf.squeeze(\n",
280-
" tf.abs(tfq.layers.State()(REFERENCE_CIRCUIT).to_tensor()) ** 2)\n",
287+
" tf.abs(tfq.layers.State()(REFERENCE_CIRCUIT).to_tensor())**2)\n",
281288
" nums = bits_to_ints(bits)\n",
282-
" return (2 ** N_QUBITS) * tf.reduce_mean(tf.gather(final_probs, nums)) - 1.0"
289+
" return (2**N_QUBITS) * tf.reduce_mean(tf.gather(final_probs, nums)) - 1.0"
283290
]
284291
},
285292
{
@@ -334,6 +341,8 @@
334341
"outputs": [],
335342
"source": [
336343
"LATENT_DIM = 100\n",
344+
"\n",
345+
"\n",
337346
"def make_generator_model():\n",
338347
" \"\"\"Construct generator model.\"\"\"\n",
339348
" model = tf.keras.Sequential()\n",
@@ -345,6 +354,7 @@
345354
"\n",
346355
" return model\n",
347356
"\n",
357+
"\n",
348358
"def make_discriminator_model():\n",
349359
" \"\"\"Constrcut discriminator model.\"\"\"\n",
350360
" model = tf.keras.Sequential()\n",
@@ -387,17 +397,21 @@
387397
"outputs": [],
388398
"source": [
389399
"cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n",
400+
"\n",
401+
"\n",
390402
"def discriminator_loss(real_output, fake_output):\n",
391403
" \"\"\"Compute discriminator loss.\"\"\"\n",
392404
" real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n",
393405
" fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n",
394406
" total_loss = real_loss + fake_loss\n",
395407
" return total_loss\n",
396408
"\n",
409+
"\n",
397410
"def generator_loss(fake_output):\n",
398411
" \"\"\"Compute generator loss.\"\"\"\n",
399412
" return cross_entropy(tf.ones_like(fake_output), fake_output)\n",
400413
"\n",
414+
"\n",
401415
"generator_optimizer = tf.keras.optimizers.Adam(1e-4)\n",
402416
"discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)"
403417
]
@@ -410,7 +424,8 @@
410424
},
411425
"outputs": [],
412426
"source": [
413-
"BATCH_SIZE=256\n",
427+
"BATCH_SIZE = 256\n",
428+
"\n",
414429
"\n",
415430
"@tf.function\n",
416431
"def train_step(images):\n",
@@ -425,8 +440,8 @@
425440
" gen_loss = generator_loss(fake_output)\n",
426441
" disc_loss = discriminator_loss(real_output, fake_output)\n",
427442
"\n",
428-
" gradients_of_generator = gen_tape.gradient(\n",
429-
" gen_loss, generator.trainable_variables)\n",
443+
" gradients_of_generator = gen_tape.gradient(gen_loss,\n",
444+
" generator.trainable_variables)\n",
430445
" gradients_of_discriminator = disc_tape.gradient(\n",
431446
" disc_loss, discriminator.trainable_variables)\n",
432447
"\n",
@@ -480,29 +495,37 @@
480495
"def train(dataset, epochs, start_epoch=1):\n",
481496
" \"\"\"Launch full training run for the given number of epochs.\"\"\"\n",
482497
" # Log original training distribution.\n",
483-
" tf.summary.histogram('Training Distribution', data=bits_to_ints(dataset), step=0)\n",
498+
" tf.summary.histogram('Training Distribution',\n",
499+
" data=bits_to_ints(dataset),\n",
500+
" step=0)\n",
484501
"\n",
485-
" batched_data = tf.data.Dataset.from_tensor_slices(dataset).shuffle(N_SAMPLES).batch(512)\n",
502+
" batched_data = tf.data.Dataset.from_tensor_slices(dataset).shuffle(\n",
503+
" N_SAMPLES).batch(512)\n",
486504
" t = time.time()\n",
487505
" for epoch in range(start_epoch, start_epoch + epochs):\n",
488506
" for i, image_batch in enumerate(batched_data):\n",
489507
" # Log batch-wise loss.\n",
490508
" gl, dl = train_step(image_batch)\n",
491-
" tf.summary.scalar(\n",
492-
" 'Generator loss', data=gl, step=epoch * len(batched_data) + i)\n",
493-
" tf.summary.scalar(\n",
494-
" 'Discriminator loss', data=dl, step=epoch * len(batched_data) + i)\n",
509+
" tf.summary.scalar('Generator loss',\n",
510+
" data=gl,\n",
511+
" step=epoch * len(batched_data) + i)\n",
512+
" tf.summary.scalar('Discriminator loss',\n",
513+
" data=dl,\n",
514+
" step=epoch * len(batched_data) + i)\n",
495515
"\n",
496516
" # Log full dataset XEB Fidelity and generated distribution.\n",
497517
" generated_samples = generator(tf.random.normal([N_SAMPLES, 100]))\n",
498-
" tf.summary.scalar(\n",
499-
" 'Generator XEB Fidelity Estimate', data=xeb_fid(generated_samples), step=epoch)\n",
500-
" tf.summary.histogram(\n",
501-
" 'Generator distribution', data=bits_to_ints(generated_samples), step=epoch)\n",
518+
" tf.summary.scalar('Generator XEB Fidelity Estimate',\n",
519+
" data=xeb_fid(generated_samples),\n",
520+
" step=epoch)\n",
521+
" tf.summary.histogram('Generator distribution',\n",
522+
" data=bits_to_ints(generated_samples),\n",
523+
" step=epoch)\n",
502524
" # Log new samples drawn from this particular random circuit.\n",
503525
" random_new_distribution = generate_data(REFERENCE_CIRCUIT, N_SAMPLES)\n",
504-
" tf.summary.histogram(\n",
505-
" 'New round of True samples', data=bits_to_ints(random_new_distribution), step=epoch)\n",
526+
" tf.summary.histogram('New round of True samples',\n",
527+
" data=bits_to_ints(random_new_distribution),\n",
528+
" step=epoch)\n",
506529
"\n",
507530
" if epoch % 10 == 0:\n",
508531
" print('Epoch {}, took {}(s)'.format(epoch, time.time() - t))\n",

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ cirq-core==1.3.0
22
cirq-google==1.3.0
33
sympy==1.12
44
numpy==1.24.2 # TensorFlow can detect if it was built against other versions.
5-
nbformat==4.4.0
5+
nbformat==5.1.3
66
pylint==2.4.4
77
yapf==0.40.2
88
tensorflow==2.15.0

0 commit comments

Comments
 (0)