From ee33999cada4c2927bf3e906e0c37e6f697d0602 Mon Sep 17 00:00:00 2001 From: fkiwit Date: Mon, 22 Sep 2025 09:14:35 +0200 Subject: [PATCH 01/21] Add demo on low depth MNIST circuits --- .../low_depth_circuits_mnist/circuit.png | Bin 0 -> 10440 bytes .../low_depth_circuits_mnist/so4.png | Bin 0 -> 3204 bytes .../low_depth_circuits_mnist/demo.py | 610 ++++++++++++++++++ .../low_depth_circuits_mnist/metadata.json | 32 + .../low_depth_circuits_mnist/requirements.in | 11 + 5 files changed, 653 insertions(+) create mode 100644 _static/demonstration_assets/low_depth_circuits_mnist/circuit.png create mode 100644 _static/demonstration_assets/low_depth_circuits_mnist/so4.png create mode 100644 demonstrations_v2/low_depth_circuits_mnist/demo.py create mode 100644 demonstrations_v2/low_depth_circuits_mnist/metadata.json create mode 100644 demonstrations_v2/low_depth_circuits_mnist/requirements.in diff --git a/_static/demonstration_assets/low_depth_circuits_mnist/circuit.png b/_static/demonstration_assets/low_depth_circuits_mnist/circuit.png new file mode 100644 index 0000000000000000000000000000000000000000..f2d13ff89f5f04c9375fa040788cce089cec342c GIT binary patch literal 10440 zcmb7q2|SeF_y04-l4We!vX%%VWG_pm>{*H!j8L+Sy%@%Bs3<~_J&b*)5ZRKXEMX!e zyONB3Cq(~e)c5o2`~NNf-+x~7Jag~4_ndR@otg8#@AKRTR}8gbblh|R0Kjy0G>icN zN&o;z&JidmS+{^a1wW48)G;;y0RK|}fV&F-yPyb10s!wb05EqQ0F;sefb-VVdLtE3 zaqx=1iRQ0gzra5K%NMS>`S*JQmkGYX@;`;&<$ol~W@gXqgJ~$K=&6s;F*Aeqz12;? zyNjwmsKPEFAjKodB_w1fbDo}sMbXFz&c$z|sPWt8bwy1IC{&1@ThSPtBl&gV`t`$4 zAVe1h7TD++&BDT*RrP-h4KWz7zyV{v1ed>Wc!Q3b=I=rN9wB%KwRl8D!6t1nSs6XO zd-^s=9FB&Lj{6k2SO5bJ?Qbpc{@d*TdI)TB_}6~iHUi7dfvTz7nJ<1rD9v zfb9-TOG~DvrY6=~-@u`GVT=v00uDau zBew1?5$75v778Z1y$ierWtUecX1be4NfWDYo0o_Ss}sXNyC?R?J~%9y7N>MCG`ycE z=-%1+xFd*lh@GjeC>VU7)a(7dTJZk+f{EG+jkPObt3hsKpnHg|p0);XpNdZhtkB-l zvG4+Arw=c$`?KUPmjv=*Vc)gI)0&=Q#!oaJvx;3ZiLG|pd++mn zl#1G%!q@T4>mJFBnJA4V#^=h*>pUNJ-kdppB<%nB#&6l7e1`O5?#zYemO__H^(EIk zt=BAYyPJuLf(ysWJ?zA#tdASZ#YT1qnlNhs+~Xgm;F3|%Uw`pBl<_%D3>*{sj1W99aj zRg6z6XuvLbBU+WYwd~s~Ipl4lOy8n$++lNnOg-PgN>}xnshhBvKtY!`Ow5&j=!UCV zI2JoQw<_%W!sGW$xx8c&LQuU12k!0<;+u~X&zn(HKg22x7JuNzx)S|NRL+Y^>b+Nc zZfd$={8MC|OJqP`C`$5|dwI+c`geF&DnyrWffuYSrkk7bd&tY1RXsT}a7z2tO??Mq z&~=6JPE6FV_9-S6tih~yx=TRB(U6wK^cfofZ3UHBN?-Emm2N8)aBVFueN!s+>J!Rj zG!D;&)K#~&8JEC{#CPzUY1fK8jpMX_#ZL(@L5Bnc?BObM+zdPDdz)>2ab7LoKh>m&yQr61e04ywC;16L!w5lIV`nP0q1UWL~z8B7%9&Lm(``9BR3daNGi zc|%Z1N4YQ95}SIu{jYftk8V1YL5I0C?5!fDvm$Hw4JQh}x|XKP`Q3#NpBr~%NPAKZ z)W208yL|savMH?%$3W?Jff++R)$p|i3YndA+dobQY}e@czx}LzlYRFd^dhH2alaE^ z(CMXwYX+f)O;hDH!ROaj#*Yn@>UXe*7!ga|Ad6nLS_Nl4lF*f!pOVG;Rv8UqEtc6_ zs5aQgADCIHjiqhgMH8oNFPIvMS^7_M2q0k_jHUo8)?U0v8Ax?Tg7OjG>s&p8UTipO zf*FOu3L|cb8+}JQDl=!j^4Oaa#TBcgMJI(_+yoIfna~jk`hWs2Zx7-i(t%jQnywBt{&6rR%+suN8f>BPq~{Ja1I zB^{))Bh&$I1QdM);o^i8I*TJ}1zoxY;7GTlmMwQIhJ}X$UC8*)Prg($`m-&& zH1`uW_3R)~DilFEy~OOn7N^m%Gl`}E7;g2n*uaI#L(uydcx}R4S_I*z`F;wuW|qoM z*FrYhqmQmantyg*>kMTKuWIhPtakS&mGlE5CHlg$T%4Pd?rTrk)<7YvX@t!UG1Tm>%9p*3SW6G7_(`k7y)eta)zDUc&%k2`%z zc_9hiqIOJo2^5<6@eqZ-bI(Gp&2PrwAhN=lcB7E_iKe=v%vE`U1B$)&rJ}(PrQu=! zCD}O~V@}(I>f|`)jm^sSnH^3~bWabZ;qOlxSB48(nV(z& zB<9i`ZeD%IZ@?EPE8vm-?H<#$Ds5-nE4le#+s7w+lnWZMaW;)V;+*DTm|gV_$p9z3 zqwnH)v6T1BRGoy}8ls^sz$+#A+!#AV9#Y?<+{Z2aIM5ThoF35}m-Z;0+TVe=C$dTO z`)T1+G^4b2!NWcWS>co2_5eP(5V)&BNX+nUbq`) zqdpkbNp3Gc)4n`=ZB01D3_zE2u4`@uF!9HGR9_1bKL}#Md*HjuTyN6O@M&7Pv*0H> zQxZO>2O4vE?}q|}rxt4O4j!S24gK`N&!Z)4Tg+=JM7|sPCAm)0FG_tZCLvd;*D3+R zwF}CDq2{#_JIA!AOj+=%P}UbQh1hSx6mo;@R9hp}wzW~~*-~_Xgd6c$Lg6>X&sEcf z?3*nTYj>xvKnUTheG%BgqcL?Irofm2;m!Cg7kX}|qe@$|>Jo*72h!WAI|plO`Wq*YjQ+cf#x1ozH+dr~qTags6er=t*+u) z{wx(glXo{~%0K(Pm(m@}c-s4JCrc0?t+uI;h;Y`)5>|#~$hmmRHdqL%o%SM~FTWXW?+1}s#vX1dJ{hT!cpr`t6_SgEJd|kVC z^Ts4KeuNtJPMHs?y6D{;)OI08sY(GD_MLTMt5WiDF?ui~lzgqW)nVW=;-$4p)0Psp zcrm_;J;C-$2O?I)4ZourYGXY{UdCNFNvj6eNF4dtX1&nNEV^4dy*6LyoHyO z$^o=AgtQg9)UHg23mD&>Ol-TT36Iza623GD#fO)89wb08YB={+uI0RMQLWafKvpH#ty>=8Bqt<$Mg2KI3g1z-?TFomj7ym%C&oS~%~1Hm~)m=0)a zDt_yJh`S(|k+HVIIG&Qm_+r?dXd`mLISvFjGAKjQhiG>QiT?<|{n2kN5S__bN$xc! zNC=o3>^2My4qOp&2b>ikq}9xN>jFe3f1?ux`rCB)Fr@xH8JNi`D5Op`QwQCHEqOLp zUPR@^FzCPBka7Os(Dz>rf8bb=a(Aoja(FsLP~e(l2FxAwV;QnU1=cZZMa+nk(Ndon z2|82s=E~VI`ezFB3bXw>v@MrL_KQn5YOQ9~o z%nsZJ?g6G0rFpH2+;>t0%l^6py+)fs zTJk~&W0e^VxWTA^5v=@Z{OV?nXw8SU>H0Ee?!zE*CNvmFaJRQn$Y4Rt)^dFw$pvGx zVWeeLU?#;h-l3!sBqR2Lqyuu-`3OFXn`{}!@qkWEZ{`^Y)$^2?NyDbAMZQj`5*ET1 z;d!wX*G|qjRN_mzQyWo329-*5rrv0eEtW!`QxEXMZT-OOySPgqJdlXS`5e1IyuwGY z)~K^}-R42lEowxpx46-_ zmw&vCjJ~Le2;f+aT*?O)6DixC*;?T;Hb3@cJ}lX=I|pAw1XQW{_+H7Q*>=xk5)L=t51Q34ri!e`zx8bvo-K&n!i@yT0CS_4AK{d8*WQ;60`(EXg=9=qHnlad>oi%!1BCa=4Qpd3R zSElpDAr8V2YjzXYaUa-*hD@qsB_i`QyU_PvW7Gus@dwkVS9Cbg7WBOB6lEVM+@vgT zM=^Ec#{1<`Isf3GxoKLk*8FG@bT^Fy?m^fwPalmj8e&4<_*Y#5O{ckgcu2Ur&{MZ{>8UTs^ zawM(4_O6po&3^6M3ucv*_FW&M_`jKD-%0WzO^5qy3OBc8NN5|Rpl3w3$)2K_Zo^QnA8qpK!DGmK2(zdR65Ma^;~@^{o!!vs?~(udI=K za{;V_6DbpSMV|f$-5g(9Gr63j&Gc$z`kdcW#`0t$9i*CpJc13N&t<)#C3)M>ebK+h zUM^(%6rx#9;2GcQ_!k;tK~;CNE8fav6cdsFvctu1W(N&ejy>w~r`xOoO`BXIgfo_u$F-h(cWr8M|iBFkD#oc7)K_Ij!v^mhnXj$pQm zIvfU0!fU#|K3?1jbm_k&L&PM+WtKB}N&U!os|GN$#+MfuTEzvA;2$8Oe6?u_?nB10 zJW?}zCJ>CMEz%&{mQq)KdhRCk?D{!&Ktd3Zu;qg+i$YL3aGZ5-;c0ojU0x-CFmIpk z2@>c5%{DMhQWK03N&Ym53Q0K96+iFt(3)uTVf(igF#F@>Qbpu`DviZ z8j%zL_JzYQ@bvOP2~EHu%``Q-?cMo=3O`6kq5};l<`^qI=F_GIZG-Fy3~TPDKAN>+ zVCY75Dw%@jm~rFBA^#OrfyDBV#gWk|(RJcEnG-6`9L!RHojn){5&|7$=KslZtGPd1oggl5Dc##Z#ZmMrsft$MO~+HzmH;0ARa&{%iKWiC?Ykv66;*8_EzzaU82)RzwtjX9s%aB1dWME3jdt%&q5(Vv!w3GcAcS-c``3Olzab2Q zf&G!)t5AxtPZtY0iQ|9wqW8fqXaXW2*u?8X*R}!`T6$RyI_y=N8X27kuVEpCDi^LB zs(g}y^V@j4Cx)M@Q2-e5&a2K<6R~HqoSdrJCP5o{qivoMbH=O1 z?pl$^OzDbB0X3r6{p2=NvG>{eQa1`h*tiq0QJNKK!q>67RJxu+O}epGyOn9G?P*z3 zQ%6ZU>yjNNPos3~{4!CAw4i$e#KBmuB2wF{)FI_XT={B@3gH1wPbM*un^VAD0=BVW zy|VDK{8L9z^ze5jy!W}R6EIBJ;LfqE&+&l;mi#0;rG>C73qnp86^@ZgnocrRYo6Yb zOX0o>!SsCz))Mu3THXD{A8=#H-lSF%`|-$lVL`V#LX4B;=`7Sxp>m<4EalX9K- zuXe4bgAD}+>j%<#qgFvmmM=gOI3Cr7lu#o0rT2G^S;9pDj5StwI>y9Qzf_D+gpDi- z$KdJf4dC0iik<=lV~)>e?432Zp52Ts+prr!p&72a!w7EDO;k%aC;-Y2EP4%6;*ko`o~Oz|i>wJ+RWve0_h1BSx*fsRp*5YNjDXsPB@ z{kdDJT@}|GE+Q&c>)C5JueK{goihXCY=Yx5IQdvbO0b6O;-52^3D2#KIy{~^b{Sc| zh_fEa%^+3jX3@i8n93H{Oy=IGHxxdNu$`vvX5uMvd)3;6ie`>GKrB%e_ZLVQ$xM8A0(*dfr5IoqjKlV~cS%&iR4X$XK6u zr~31So0+=a$(4W6Ri5*8bp!lk==Lzbzul8{r`;SY4@DJg?a^RF#w~O?ieFIgMh?+) zf+6bIeDx%lTRRI!sg^S$D%c5gO;9|Kc3UJ23_f`{BT_Gqf6D?4#nlhJQDB63=SsMB za`z74&wjWV65Q;)0?*o|`!oNb10P5(qcq$8Jw=H-1&loanU48iOa~mP0hqO12GCiB zFK0zM4`$BrAfO}^z5(>Y@!-cKqeoJs|mW;77h`w5s2o5I4(U$WdFY}{Wa(oWeA9i5 z<{*)lRNmN6NqVeVVs4M1k)92^O)xgm0{PCXl#UMv6sp%X>LPIpMV!2!xglc-SZ-j^ zSX==~i9xeSq3lfsf~~`;~m<^od~$D4v}h{_TXS0<=UEnHNidVZlTdU8S)x3O5G9ubz?l ztkB)2(#hHflF11HYU0?dM@VTqVFkIKx)lxnZJP9W+2y-%LBFd<(B-xhnFqcBQJ)n; zl*PBf_^7x>CGe~^vLE4p0iPXE-BUFy`(fx%Xs5whJ9+gY^#>Qi8 zP$ip34tM%N1bNIJs0Yq$6f1W~e)1!Af^Y7YZMR?Z+&oE*c{O>W&glS&T(UZWpO|jW ztt?!ra+6jZY1m3rGjKke`F$ovRT{s(K9_5`u^JqlAjCo8zd4T_CH5LN9u04OHSm z#4Q z__XQH`@W9un1+_k02{hUchs8-<*_VBiE@8f03~!;4e+b+vLF;IV+_II&Lb~OjR&6= zYk_5Y3)SHJ@;G1xLHQh?L2 zFU?5;>0aI&>+K~7<|w!8cmcsL4?bMslj>oZtZm z3GTB!mN(c)5ZRfgJTyOh+739)qd4M-@;>G8?`K-SDFtyZHmsS@9rwN&-+%jUH?2*TY-HxxJIsH6^D)`+0HcNa8I1V$4vL@{Ak!nj;w$@0pM@m!xh-XU? z;~4~iP>FDv;RCw&2_r|jfqH2|J&26!Y786=3(yKPe{eHNW!89tawFhc$CbvKf+`y+D>_dV5T-2|&gS|C@t*cAeg^)ghgQR_%T}z-$grK(H@mooa zilBjanfs~zNqK{bzHDLpC31`ArMj7m*$~yXljKXn9HRjkNtX$+V9NhKTgS;S%9z$@ z;lrKwrYpnZe)4mxIC}TXQ!B3u9KLy9tZezRk^U>Yn=5rPeIO=lA`?02c1fr=-(WqK zy}OzBk)Yq1pqXa6=&T~68y~(FSoW)pPIPCU-{jaw?`pAIipF)M?Y)1?Q%$YcI7+<6{R5)uEj~p6#fJZur+@kxj!BW&J^0cDpq?x>u+>M0ani_U=8LeZ-Xe?N)g3 z`12g6d#dMhykDxzo65SS%?DVDY!4Jne~-DGZ_{RL!Aj(xVfS*pQ&CV0mp|{=$#Kuo zp#7j_EFNyx7g5_{8SIP9=BSuGQX6*yzR9X4G^IrLfN z8^he@*|wI27~QyHEgzkx$=5_$^nTM+oH){SyC>WFql-9roTaZNrxhj?@HA~Cf9JLF z%m_<2O^h3fa)aB~QK!V*%%Uy4x}oK`s}y`S@cip+2Q%$zJoDzM{- z_`uXrW{Z$zO2eSGcbAqE-Aw{o+TCQ~{d-1-!B1h3hWFThqE)R%R*C7leC)AAp7);pE;5-6bN_#ru7$+`3MGX`J6m17H$Gl{!v+;KHT#1H!;6cZ z!^181?9tHEi>;_QdgY3dg9A3q*nV|mY#NZX&&wA$F#XCxy*T^9w|CCE;b)r@5axU( zy|bSm*(c&sM~BsN|HMk=j(NH|1KEmNZrFoLoZG>8EQY-UyW_LRv0Va`N$z`Hl*g5p zEVnY)?dCNO0IK%x&b#)X`!aPS`gy6r>I_bxWq0TouSx;5`K0Bg8L14x-`10JNo~4T zYPAZgowM!?-9KW{4+T0u7bK;0dG}crQt@VDq^w;Q8CEu zcx4S_>>rQYGv`r&jL~vBFK{c+lH2xC6Nu+l+7WG{W5TxkIXDf@6|Unb_rrJY#kSIM;6_2`7oU;qR)pY^ z$q&X)Vs)T}G4o{)Ht@6Pc38yXpd)hwbJO~DEk|R+hpst6Yh-ZQS7h&udtG0DW>85G zZTe2JlEzH;du4$NX#aZTtZ3VbAjR?}NF%SZ(^gBXgte;nWa*2qH}E^e%~3|*4S3z% zS^P`f-V40(s27ET4a+HYF31YX9^NJGH&*qo4quec$lHQi$vTMjv6s^mXGVKI=J}aj z<8N3l>$quqKxnhykX*Z8sCnjE88sKmBep4oBk&i$x!+(Rn?Mt*3)}}Hh{^nM@}wI& zHr6Dpz0&~-BAKeU(s2awkBem|zezIaq}7?kPpR@Mph@z2klNC~i!}P-A$-~-6Yjd) zKUJ7VWgN@SN>JGX*UpB(FYZ}&MEntM*QWs59nq|F^z!e&Ma{-d>AlL&ChBUi|rDY|8InKb%s-;Q`R=8mFgTE-mTrl1-o7UwnD&tkyOYl11@z#~ocU z#1-fXClzB)lvB&=ecHkY!Bjs%$ef@2qfeFr@HutxqUs*ru@l0MvbC3V@eIr^N zJUeTMaM{l8>8~`s4fXxNnkb|kx7$5cG&fM#>d9mKgCE{o+JQThf55_CdZ!(@H`x%` ztX&Asa`no|TfRSMkM?`;N|D21uw*^_jXQgTcTQw3_?XW%#!BCgdfNM3EJlsiGJ;Z1 zENkYovt$R(&l;7`4qN|7M`(_Xa8qYciRdQ!ZS+C&On;RpUL7P05v9OLD=5l3GG(-8 z%_F8QR3dshSJM#_ZvzGT)XO(T{B$AdwMI;_5~7u}SsQYWhe>lGu zH?0?_WT0`zh;8yUdoyX15>_5~+S-><`It(I4fvjre0-2PWheU_Ueg=di`1^3|5RZE zwJvQDgY;J%W$;qHx3)&mUGHZQqf+f@%tb@!+rHa6r_7Jl$mu2>^8d89|-mmm1Hto0&sXa2r3^*vf#%fRMxM%RsZetsRX z>K#oCnuxOtFG|atz{e}6PoRX#`cZ;DyIs&o=F~*ba|Z|HZk?Ah0tQ)P%cW}=roE`8 zVCpI;vly+sBM3YjTLf;&!`o4)c#l!$1U$`=+Dan?tAtAYl|AC^sjoxGo-mR1`Q6&8B}oGDJyw=w#-TJKv|h#+-%G?(uTu>2T-!hnhmi^A zO0;+Qj;!*un6wgJ`5u%*^m=^6yTXHzMs<+>A++3k*5qMS3p}6)riycB+-t&Pf34cI z^da2JH*Fa`H~tDWlq`pIQU4*|vsGP>f}P6$5-uGMxB z*`4{&2nsAM67+lG+$)Ant%dK2_YTYKDXUx9zo>C5GcsYgWg%$^`j^lopoDR>N?v8; zy5oBFkDQt>_Y!n=z0^4zTp)<-c?$t}Z$>`p+2{L?aG=^E$D{D7!@ zv4`GD$O^4Ju({WTSig;EAXQL>{i1~JXiQj_{^>^k*9Ij7vQgvF^1QUS!YnDcRB4L` z?ALx?{)BzTM9azJ4Kn~q_sYZ5C*d*sjZtgw=Ac#l9ghCmqyLyV z2vz9%(qkNQrH#&XE#Y@VRdnS7{Yg7FcqQ&PP%%*`43^&!sg^Hvc#hwymfoQMxnaIi zeF;d{sj+|7P^oqJ?z6N3^yW_#A68E*a``YG$Uk!VQo2nj6n#{uP3}?Ii(ekmvtkB~ zvUd&q?pUNb3@;Z;R(x3TTSHcgM9{TVNpA|;2z1@rS3Lurc{`b8R|4#ram7SLL;#Ac3w z_}%e<-$^7duDhO3o-uVzWALTq$?km=GrQ7^oGGrp4I21RaBz@GBt4qG&)>zH-&iJA zZ#TT1K%qsT3mbN$ki;;QLQ72u@|PCNyj4F^78c$}B!f~iGRD<6q`oeHX}rXVLLhEA zy=6a4kjEw+F_j&qy%`c`u(^1*PMu$jSO1Wb4=<~W?=w6Ot;gCcFt5sfe^Mu{8sgG> zlON4X|ZLg-9lP4L2kT;EeeZ>BY#Po>H<7l)Wg{5(xmCNhCV( zBWz#kpFaC4jc}EMDXdw2< zrw~p-LMgcoCP^fDsPPattHYJa#N{(qO3C-&S&76OHD+_O+LW> zOtC<_GUm2Ns8a~`kj|nkLl}x?PB5*8>^ki z>Q!AeYcH;0UFK9d+((>(@sFy13AUuZu3h)K-Sz&#t-7?q-|ZXP#~&C{T4v^bZ#bqX z;nsJqOPMmcqEGDARv;DeR+g!+h}MbfvTZ!d+|_b5@tm3hZ{0w8bsxHJWQQ^PHjY`3 z{aS7zxdf0xLr7tE$Ipae3t(<$Ze?O-ZDMA=f4jMzm4%(9nUR^9otatvPyFWp0it6= zBg2#a9boMBIEMiS|Ls7Ej1G$@g+#~wJ5v-8Rf#cri%eW-xSd<%zOZ=5*o2tS$e0Mn z*mHoj%{Ciz+jAD?+sv)ajj+*`__] develops an efficient algorithm for finding low-depth +quantum circuits to load classical image data as quantum states. + +This demo gives an introduction to the paper **“Typical Machine Learning Datasets as +Low‑Depth Quantum Circuits”** (2025). We will discuss the following three steps: 1) Quantum image +states, 2) Low-depth image circuits, 3) Training a small variational‑quantum‑circuit (VQC) +classifier on the dataset. +""" + +###################################################################### +# 1. Quantum image states +# ~~~~~~~~~~~~~~~~~~~~~~~ +# +# Images with :math:`2^n` pixels are mapped to states of the form +# :math:`\left| \psi(\mathbf x) \right> = \frac{1}{\sqrt{2^{n}}}\sum_{j=0}^{2^{n}-1} \left| c(x_j) \right> \otimes \left| j \right>`, +# where the **address register** :math:`\left| j\right>` holds the pixel position (:math:`n` qubits), +# and additional **color qubits** :math:`\left| c(x_j)\right>` encode the pixel intensities. For +# grayscale images, we use the *flexible representation of quantum images (FRQI)* +# [`2 <#References>`__,\ `3 <#References>`__] as an encoding. In this case, the data value +# :math:`{x}_j` of each pixel is just a single number corresponding to the grayscale value of that +# pixel. We can encode this information in the :math:`z`-polarization of an additional color qubit as +# :math:`\left|c({x}_j)\right> = \cos({\textstyle\frac{\pi}{2}} {x}_j) \left| 0 \right> + \sin({\textstyle\frac{\pi}{2}} {x}_j) \left| 1 \right>`, +# with the pixel value normalized to :math:`{x}_j \in [0,1]`. Thus, a grayscale image with :math:`2^n` +# pixels is encoded into a quantum state with :math:`n+1` qubits. +# +# For color images, the *multi-channel representation of quantum images (MCRQI)* +# [`4 <#References>`__,\ `5 <#References>`__] can be used. Python implementations of the MCRQI +# encoding and decoding are provided at the end of this demo and are discussed in Ref. +# [`1 <#References>`__]. +# + +from pennylane import numpy as np + +# Grayscale encodings and decodings + + +def FRQI_encoding(images): + """ + Input : (batchsize, N, N) ndarray + A batch of square arrays representing grayscale images. + Returns : (batchsize, 2, N**2) ndarray + A batch of quantum states encoding the grayscale images using the FRQI. + """ + # get image size and number of qubits + batchsize, N, _ = images.shape + n = 2 * int(np.log2(N)) + # reorder pixels hierarchically + states = np.reshape(images, (batchsize, *(2,) * n)) + states = np.transpose( + states, [0] + [ax + 1 for q in range(n // 2) for ax in (q, q + n // 2)] + ) + # FRQI encoding by stacking cos and sin components + states = np.stack([np.cos(np.pi / 2 * states), np.sin(np.pi / 2 * states)], axis=1) + # normalize and reshape + states = np.reshape(states, (batchsize, 2, N**2)) / N + return states + + +def FRQI_decoding(states): + """ + Input : (batchsize, 2, N**2) ndarray + A batch of quantum states encoding grayscale images using the FRQI. + Returns : (batchsize, N, N) ndarray + A batch of square arrays representing the grayscale images. + """ + # get batchsize and number of qubits + batchsize = states.shape[0] + states = np.reshape(states, (batchsize, 2, -1)) + n = int(np.log2(states.shape[2])) + # invert FRQI encoding to get pixel values + images = np.arccos(states[:, 0] ** 2 * 2**n - states[:, 1] ** 2 * 2**n) / np.pi + # undo hierarchical ordering + images = np.reshape(images, (batchsize, *(2,) * n)) + images = np.transpose(images, [0, *range(1, n, 2), *range(2, n + 1, 2)]) + # reshape to square image + images = np.reshape(images, (batchsize, 2 ** (n // 2), 2 ** (n // 2))) + return images + + +###################################################################### +# 2. Low depth image circuits +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# In general, the complexity of preparing the resulting state *exactly* scales exponentially with the +# number of qubits. Known constructions (without auxiliary qubits) use :math:`\mathcal{O}(4^n)` gates +# [`2 <#References>`__,\ `3 <#References>`__]. However, encoding typical images this way leads to +# lowly entangled quantum states that are well approximated by tensor-network states such as +# matrix-product states (MPSs) [`6 <#References>`__] whose bond dimension :math:`\chi` does not need +# to scale with the image resolution. Thus, preparing the state *approximately* with a small error is +# possible with a number of gates that scales only as :math:`\mathcal{O}(\chi^2n)`, i.e., linearly +# with the number of qubits. While the cost of the classical preprocessing may be similar to the exact +# state preparation, the resulting quantum circuits are exponentially more efficient. +# +# The following illustration shows the quantum circuits inspired by MPSs. The left side shows a +# circuit with a staircase pattern with two layers (represented in turquoise and pink), where +# two-qubit gates are applied sequentially, corresponding to a right-canonical MPS. The right side +# shows the proposed circuit architecture corresponding to an MPS in mixed canonical form. By +# effectively shifting the gates with the dashed outlines to the right, the gates are applied +# sequentially outward starting from the center. This reduces the circuit depth while maintaining its +# expressivity. + +# .. figure:: /_static/demonstration_assets/low_depth_circuits_mnist/circuit.png +# :align: center +# :width: 80 % +# :alt: Illustration of quantum circuits inspired by MPSs +# +# Illustration of quantum circuits inspired by MPSs + + +###################################################################### +# Downloading the quantum image dataset +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The dataset configuration sets the name as ``'low-depth-mnist'`` and constructs the dataset path as +# ``datasets/low-depth-mnist/low-depth-mnist.h5``. For dataset loading, if the file exists locally, it +# is loaded using ``qml.data.Dataset.open``. Otherwise, the dataset is downloaded from the PennyLane +# data repository via ``qml.data.load``, note that the dataset size is approximately 1 GB. +# +# ===================== ============================================================= +# Attribute Description +# ===================== ============================================================= +# ``exact_state`` The exact state that the corresponding circuit should prepare +# ``labels`` The correct labels classifying the corresponding images +# ``circuit_layout_d4`` The layout of the depth 4 circuit +# ``circuit_layout_d8`` The layout of the depth 8 circuit +# ``params_d4`` Parameters for the depth 4 circuit +# ``params_d8`` Parameters for the depth 8 circuit +# ``fidelities_d4`` Fidelities between the depth 4 state and the exact state +# ``fidelities_d8`` Fidelities between the depth 8 state and the exact state +# ===================== ============================================================= +# + +import os +import jax +import pennylane as qml +from tqdm import tqdm + +# JAX supports the single-precision numbers by default. The following line enables double-precision. +jax.config.update("jax_enable_x64", True) +# Set JAX to use CPU, simply set this to 'gpu' or 'tpu' to use those devices. +jax.config.update("jax_platform_name", "cpu") + +# Here you can choose the dataset and the encoding depth, depth 4 and depth 8 are available +DATASET_NAME = "low-depth-mnist" + +dataset_path = f"datasets/{DATASET_NAME}.h5" + +# Load the dataset if already downloaded +if os.path.exists(dataset_path): + dataset_params = qml.data.Dataset.open(dataset_path) +else: + # Download the dataset (~ 1 GB) + [dataset_params] = qml.data.load(DATASET_NAME) + +###################################################################### +# In the following cell, we define the ``get_circuit`` function that creates a quantum circuit based +# on the provided layout. The ``circuit_layout`` is an attribute of the dataset that specifies the +# sequence of quantum gates and their target qubits, which depends on the number of qubits and circuit +# depth. After defining the circuit function, we extract the relevant data for binary classification +# (digits 0 and 1 only) and compute the quantum states by executing the circuits with their +# corresponding parameters. These generated states will be used later for training the quantum +# classifier. +# + +TARGET_LABELS = [0, 1] + + +def get_circuit(circuit_layout): + """ + Create a quantum circuit with a given layout for preparing quantum states. + The circuit only contains RY rotation gates and CNOT gates, designed for efficient + state preparation with low circuit depth. + + :param circuit_layout: List of tuples containing gate types ('RY' or 'CNOT') and their target wires. + :return circuit: A JAX-compiled quantum circuit function that takes parameters and returns the quantum state. + """ + dev = qml.device("default.qubit", wires=11) + + @jax.jit + @qml.qnode(dev) + def circuit(params): + counter = 0 + for gate, wire in circuit_layout: + + if gate == "RY": + qml.RY(params[counter], wire) + counter += 1 + + elif gate == "CNOT": + qml.CNOT(wire) + + return qml.state() + + return circuit + + +# Unpack the dataset attributes, in this demo only digits 0 and 1 will be used +labels = np.asarray(dataset_params.labels) +selection = np.isin(labels, TARGET_LABELS) +labels_01 = labels[selection] +exact_state = np.asarray(dataset_params.exact_state)[selection] + +circuit_layout = dataset_params.circuit_layout_d4 +circuit = get_circuit(circuit_layout) +params_01 = np.asarray(dataset_params.params_d4)[selection] +states_01 = np.asarray( + [circuit(params) for params in tqdm(params_01, desc="States for depth 4")] +) +fidelities_01 = np.asarray(dataset_params.fidelities_d4)[selection] + +###################################################################### +# Reconstructing images from quantum states +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# To investigate how well the low-depth circuits reproduce the target images, we first **reconstruct** +# the pictures encoded in each quantum state. The histogram below reports the *fidelity* +# :math:`F = \left|\langle \psi_{\text{exact}} \mid \psi_{\text{circ.}} \rangle\right|^{2}`, i.e. the +# overlap between the exact FRQI state $ +# \|:raw-latex:`\psi`\_{:raw-latex:`\text{exact}`}:raw-latex:`\rangle `$ and its 4-layer +# center-sequential approximation :math:`|\psi_{\text{circ.}}\rangle`. +# +# - **Digit 1** samples (orange) cluster at a fidelity :math:`F` close to 1, indicating that four +# layers already capture these images almost perfectly. +# - **Digit 0** samples (blue) display a broader, slightly lower-fidelity distribution, hinting at the +# greater entanglement required to reproduce their curved outline. +# +# On the right we decode the states back into pixel space. In line with the histogram, the +# reconstructed “1” is virtually indistinguishable from its original, whereas the reconstructed “0” +# shows minor blurring. By selecting a deeper circuit the quality of the reconstructed images could be +# improved by trading quality for efficiency. +# + +import matplotlib.pyplot as plt + +# Select images with highest fidelity +idx_0 = np.argmax(fidelities_01[labels_01 == 0]) +idx_1 = np.argmax(fidelities_01[labels_01 == 1]) + +orig_0 = FRQI_decoding(exact_state[labels_01 == 0][idx_0][None, :])[0] +orig_1 = FRQI_decoding(exact_state[labels_01 == 1][idx_1][None, :])[0] + +rec_0 = FRQI_decoding(states_01[labels_01 == 0][idx_0][None, :])[0] +rec_1 = FRQI_decoding(states_01[labels_01 == 1][idx_1][None, :])[0] + +# Create a grid of figures to show both the fidelity distribution and the original and reconstructed images +fig = plt.figure(figsize=(9, 5)) +gs = fig.add_gridspec(2, 3, width_ratios=[1.2, 1, 1], wspace=0.05) + +# Histogram (spans both rows, leftmost column) +ax_hist = fig.add_subplot(gs[:, 0]) +ax_hist.hist(fidelities_01[labels_01 == 0], bins=20, alpha=0.5, label="Digit 0") +ax_hist.hist(fidelities_01[labels_01 == 1], bins=20, alpha=0.5, label="Digit 1") +ax_hist.set_xlabel("Fidelity") +ax_hist.set_ylabel("Count") +ax_hist.legend(loc="upper right") + +# Image axes (2 × 2 on the right) +ax00 = fig.add_subplot(gs[0, 1]) +ax01 = fig.add_subplot(gs[0, 2]) +ax10 = fig.add_subplot(gs[1, 1]) +ax11 = fig.add_subplot(gs[1, 2]) + +ax00.imshow(np.abs(orig_0), cmap="gray") +ax00.set_title("Original 0") +ax01.imshow(np.abs(orig_1), cmap="gray") +ax01.set_title("Original 1") +ax10.imshow(np.abs(rec_0), cmap="gray") +ax10.set_title("Reconstructed 0") +ax11.imshow(np.abs(rec_1), cmap="gray") +ax11.set_title("Reconstructed 1") + +# Remove all tick marks from image axes +for ax in [ax00, ax01, ax10, ax11]: + ax.set_xticks([]) + ax.set_yticks([]) + +###################################################################### +# 3. Quantum classifiers +# ---------------------- +# +# In this demo, we train a **variational quantum circuit** as classifier. Our datasets require +# ``N_QUBITS = 11``, therefore we use the same number of qubits for the classifier. Given a data state +# :math:`\rho(x)=\lvert\psi(x)\rangle\langle\psi(x)\rvert`, a generic **quantum classifier** evaluates +# :math:`f_{\ell}(x) = \operatorname{Tr}\bigl[ O_{\ell}(\theta)\,\rho(x) \bigr]`, with trainable +# circuit parameters :math:`\theta` that rotate a measurement operator :math:`O_\ell`. Variants +# explored in the paper [`1 <#References>`__] include +# +# - **Linear VQC** — sequential two‑qubit SU(4) layers (15 parameters per gate). +# - **Non‑linear VQC** — gate parameters depend on input data *x* via auxiliary retrieval circuits. +# - **Quantum‑kernel SVMs** — replacing inner products by quantum state overlaps. +# - **Tensor‑network (MPS/MPO) classifiers** for large qubit counts. +# +# In this demo we use a small linear VQC. The circuit consisits of two qubit gates correspdonding to +# the ``SO(4)`` gates +# +# .. figure:: /_static/demonstration_assets/low_depth_circuits_mnist/so4.png +# :align: center +# :width: 20 % +# :alt: Illustration of the SO(4) decomposition +# +# arranged in the sequential layout. +# + +import optax + +# Define the hyperparameters +EPOCHS = 5 +BATCH = 128 +VAL_FRAC = 0.2 +N_QUBITS = 11 +DEPTH = 4 +N_CLASSES = 2 +SEED = 0 + +# Explicitly compute the number of model parameters +N_PARAMS_FIRST_LAYER = N_QUBITS +N_PARAMS_BLOCK = 4 +N_PARAMS_NETWORK = N_PARAMS_FIRST_LAYER + (N_QUBITS - 1) * DEPTH * N_PARAMS_BLOCK + +key = jax.random.PRNGKey(SEED) + +# Define the model and training functions +dev = qml.device("default.qubit", wires=N_QUBITS) + + +@jax.jit +@qml.qnode(dev, interface="jax") +def circuit(network_params, state): + p = iter(network_params) + qml.StatePrep(state, wires=range(N_QUBITS)) + + # First two layers of local RY rotations + for w in range(N_QUBITS): + qml.RY(next(p), wires=w) + + # SO(4) building blocks + for _ in range(DEPTH): + for j in range(N_QUBITS - 1): + qml.CNOT(wires=[j, j + 1]) + qml.RY(next(p), wires=j) + qml.RY(next(p), wires=j + 1) + qml.CNOT(wires=[j, j + 1]) + qml.RY(next(p), wires=j) + qml.RY(next(p), wires=j + 1) + + # Probability of computational basis states of the last qubit + # Can be extended to more qubits for multiclass case + return qml.probs(N_QUBITS - 1) + + +model = jax.vmap(circuit, in_axes=(None, 0)) + + +def loss_acc(params, batch_x, batch_y): + logits = model(params, batch_x) + loss = optax.softmax_cross_entropy_with_integer_labels(logits, batch_y).mean() + acc = (logits.argmax(-1) == batch_y).mean() + return loss, acc + + +# training step +@jax.jit +def train_step(params, opt_state, batch_x, batch_y): + (loss, acc), grads = jax.value_and_grad( + lambda p: loss_acc(p, batch_x, batch_y), has_aux=True + )(params) + updates, opt_state = opt.update(grads, opt_state, params) + return optax.apply_updates(params, updates), opt_state, loss, acc + + +# data loader +def loader(X, y, batch_size, rng_key): + idx = jax.random.permutation(rng_key, len(X)) + for i in range(0, len(X), batch_size): + sl = idx[i : i + batch_size] + yield X[sl], y[sl] + + +###################################################################### +# Preparing the training / validation split +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# We start by **casting** the FRQI amplitude vectors and their digit labels into JAX arrays. Next, the +# states and labels are shuffled from a pseudorandom key derived from the global ``SEED``. Then, the +# data is split into training and validation. Finally, we gather the tensors corresponding in the +# training ``(X_train, y_train)`` and validation sets ``(X_val, y_val)``. +# + +from jax import numpy as jnp + +# Prepare the data + +X_all = jnp.asarray( + states_01.real, dtype=jnp.float64 +) # we select the real part only, as the the imaginary part is zero since we only use RY and CNOT gates +y_all = jnp.asarray(labels_01, dtype=jnp.int32) + +key_split, key_perm = jax.random.split(jax.random.PRNGKey(SEED)) +perm = jax.random.permutation(key_perm, len(X_all)) +split_pt = int(len(X_all) * (1 - VAL_FRAC)) + +train_idx = perm[:split_pt] +val_idx = perm[split_pt:] + +X_train, y_train = X_all[train_idx], y_all[train_idx] +X_val, y_val = X_all[val_idx], y_all[val_idx] + +###################################################################### +# Training setup and loop +# ~~~~~~~~~~~~~~~~~~~~~~~ +# +# We begin by **initializing** the network weights ``params`` with values drawn uniformly from +# :math:`[0, 2\pi]` and initialize the **Adam optimizer** with a learning rate of +# :math:`1 \times 10^{-2}`. The **training loop** then iterates for ``EPOCHS`` and displays the +# progress via ``tqdm``: +# +# 1. For each mini-batch, ``train_step`` performs a forward pass, computes the cross-entropy loss and +# accuracy, back-propagates gradients, and updates ``params`` through the optimizer state +# ``opt_state``. +# 2. Using the *current* parameters, we evaluate the same metrics on the validation set **without** +# gradient updates. +# 3. Epoch-mean loss (``tl``, ``vl``) and accuracy (``ta``, ``va``) are appended to the tracking lists +# for later plotting. +# +# The first epoch will take longer than following epochs because of the just-in-time compilation. +# + +from tqdm.auto import trange + +# Define the training setup and start the training loop + +# optimizer +params = 2 * jnp.pi * jax.random.uniform(key, (N_PARAMS_NETWORK,), dtype=jnp.float64) +opt = optax.adam(1e-2) +opt_state = opt.init(params) + +# training loop +rng = key_split +train_loss_curve, val_loss_curve = [], [] +train_acc_curve, val_acc_curve = [], [] +# for epoch in range(1, EPOCHS + 1): +bar = trange(1, EPOCHS + 1, desc="Epochs", unit="ep") +for epoch in bar: + # train + rng, sub = jax.random.split(rng) + train_losses, train_accs = [], [] + for bx, by in loader(X_train, y_train, BATCH, sub): + params, opt_state, l, a = train_step(params, opt_state, bx, by) + train_losses.append(l) + train_accs.append(a) + + # validation + val_losses, val_accs = [], [] + for bx, by in loader(X_val, y_val, BATCH, rng): + l, a = loss_acc(params, bx, by) + val_losses.append(l) + val_accs.append(a) + + tl = jnp.mean(jnp.stack(train_losses)) + vl = jnp.mean(jnp.stack(val_losses)) + ta = jnp.mean(jnp.stack(train_accs)) + va = jnp.mean(jnp.stack(val_accs)) + + train_loss_curve.append(tl) + val_loss_curve.append(vl) + train_acc_curve.append(ta) + val_acc_curve.append(va) + bar.set_postfix( + train_loss=f"{tl:.4f}", + val_loss=f"{vl:.4f}", + train_acc=f"{ta:.4f}", + val_acc=f"{va:.4f}", + ) + +# Plot the training curves +( + fig, + ax, +) = plt.subplots(1, 2, figsize=(12.8, 4.8)) +ax[0].plot(train_loss_curve, label="Train") +ax[0].plot(val_loss_curve, label="Validation") +ax[0].set_xlabel("Epoch") +ax[0].set_ylabel("Loss") +ax[0].legend() +ax[1].plot(train_acc_curve) +ax[1].plot(val_acc_curve) +ax[1].set_xlabel("Epoch") +ax[1].set_ylabel("Accuracy") + +###################################################################### +# Conclusion +# ~~~~~~~~~~ +# +# | In this notebook we have demonstrated the use of low-depth quantum circuits to load and +# subsequently classify (a subset of) the MNIST dataset. +# | By filtering to specific target labels, constructing parametrized circuits from the provided +# layouts, and evaluating their states and fidelities, we have gained hands-on experience with +# quantum machine learning workflows on real data encodings. +# +# Explore the full set of `provided +# datasets `__—they contain a +# variety of different datasets at varying circuit depths, parameterizations, and target classes. You +# can adapt the presented workflow to different subsets and datasets, experiment with your own models, +# and contribute back insights on how these benchmark datasets can best support the development of +# practical quantum machine learning approaches. +# + +###################################################################### +# References +# ---------- +# +# [1] F.J. Kiwit, B. Jobst, A. Luckow, F. Pollmann and C.A. Riofrío. Typical Machine Learning Datasets +# as Low-Depth Quantum Circuits. *Quantum Sci. Technol.* in press (2025). DOI: +# https://doi.org/10.1088/2058-9565/ae0123. +# +# [2] P.Q. Le, F. Dong and K. Hirota. A flexible representation of quantum images for polynomial +# preparation, image compression, and processing operations. *Quantum Inf. Process* 10, 63–84 (2011). +# DOI: https://doi.org/10.1007/s11128-010-0177-y. +# +# [3] P.Q. Le, A.M. Iliyasu, F. Dong, and K. Hirota. A Flexible Representation and Invertible +# Transformations for Images on Quantum Computers. In: Ruano, A.E., Várkonyi-Kóczy, A.R. (eds) *New +# Advances in Intelligent Signal Processing. Studies in Computational Intelligence*, vol 372. +# Springer, Berlin, Heidelberg (2011). DOI: https://doi.org/10.1007/978-3-642-11739-8_9. +# +# [4] B. Sun *et al.* A Multi-Channel Representation for images on quantum computers using the RGBα +# color space, 2011 *IEEE 7th International Symposium on Intelligent Signal Processing*, Floriana, +# Malta, pp. 1-6 (2011). DOI: https://doi.org/10.1109/WISP.2011.6051718. +# +# [5] B. Sun, A. Iliyasu, F. Yan, F. Dong, and K. Hirota. An RGB Multi-Channel Representation for +# Images on Quantum Computers, *J. Adv. Comput. Intell. Intell. Inform.*, Vol. 17 No. 3, pp. 404–417 +# (2013). DOI: https://doi.org/10.20965/jaciii.2013.p0404. +# +# [6] B. Jobst, K. Shen, C.A. Riofrío, E. Shishenina and F. Pollmann. Efficient MPS representations +# and quantum circuits from the Fourier modes of classical image data. *Quantum* 8, 1544 (2024). DOI: +# https://doi.org/10.22331/q-2024-12-03-1544. +# + +###################################################################### +# Appendix +# -------- +# + +# The CIFAR-10 and Imagenette datasets use the following MCRQI color encoding and decoding [4,5] + + +def MCRQI_encoding(images): + """ + Input : (batchsize, N, N, 3) ndarray + A batch of arrays representing square RGB images. + Returns : (batchsize, 8, N**2) ndarray + A batch of quantum states encoding the RGB images using the MCRQI. + """ + # get image size and number of qubits + batchsize, N, _, channels = images.shape + n = 2 * int(np.log2(N)) + # reorder pixels hierarchically + states = np.reshape(images, (batchsize, *(2,) * n, channels)) + states = np.transpose( + states, + [0] + [ax + 1 for q in range(n // 2) for ax in (q, q + n // 2)] + [n + 1], + ) + # MCRQI encoding by stacking cos and sin components + states = np.stack( + [ + np.cos(np.pi / 2 * states[..., 0]), + np.cos(np.pi / 2 * states[..., 1]), + np.cos(np.pi / 2 * states[..., 2]), + np.ones(states.shape[:-1]), + np.sin(np.pi / 2 * states[..., 0]), + np.sin(np.pi / 2 * states[..., 1]), + np.sin(np.pi / 2 * states[..., 2]), + np.zeros(states.shape[:-1]), + ], + axis=1, + ) + # normalize and reshape + states = np.reshape(states, (batchsize, 8, N**2)) / (2 * N) + return states + + +def MCRQI_decoding(states): + """ + Input : (batchsize, 8, N**2) ndarray + A batch of quantum states encoding RGB images using the MCRQI. + Returns : (batchsize, N, N, 3) ndarray + A batch of arrays representing the square RGB images. + """ + # get batchsize and number of qubits + batchsize = states.shape[0] + states = np.reshape(states, (batchsize, 8, -1)) + N2 = states.shape[2] + N = int(np.sqrt(N2)) + n = int(np.log2(N2)) + # invert MCRQI encoding to get pixel values + images = ( + np.arccos(states[:, :3] ** 2 * 4 * N2 - states[:, 4:7] ** 2 * 4 * N2) / np.pi + ) + # undo hierarchical ordering + images = np.reshape(images, (batchsize, 3, *(2,) * n)) + images = np.transpose(images, [0, *range(2, n + 1, 2), *range(3, n + 2, 2), 1]) + # reshape to square image + images = np.reshape(images, (batchsize, N, N, 3)) + return images diff --git a/demonstrations_v2/low_depth_circuits_mnist/metadata.json b/demonstrations_v2/low_depth_circuits_mnist/metadata.json new file mode 100644 index 0000000000..5bc8c1fc74 --- /dev/null +++ b/demonstrations_v2/low_depth_circuits_mnist/metadata.json @@ -0,0 +1,32 @@ +{ + "title": "Loading classical data with low-depth circuits", + "authors": [ + { + "username": "flokiwit" + }, + { + "username": "bernhard" + }, + { + "username": "criofrio" + } + ], + "executable_stable": true, + "executable_latest": true, + "dateOfPublication": "2025-09-21T00:00:00+00:00", + "dateOfLastModification": "2025-09-21T00:00:00+00:00", + "categories": [], + "tags": [], + "previewImages": [ + { + "type": "thumbnail", + "uri": "/_static/demo_thumbnails/regular_demo_thumbnails/thumbnail_placeholder.png" + } + ], + "seoDescription": "This demo shows how to efficiently encode classical image data into quantum states using low-depth circuits, and train a variational quantum classifier on a low-depth MNIST dataset.", + "doi": "", + "references": [], + "basedOnPapers": [], + "referencedByPapers": [], + "relatedContent": [] +} \ No newline at end of file diff --git a/demonstrations_v2/low_depth_circuits_mnist/requirements.in b/demonstrations_v2/low_depth_circuits_mnist/requirements.in new file mode 100644 index 0000000000..36cb65be8a --- /dev/null +++ b/demonstrations_v2/low_depth_circuits_mnist/requirements.in @@ -0,0 +1,11 @@ +pennylane==0.42.1 +autoray==0.6.12 +jax==0.4.33 +jaxlib==0.4.33 +optax==0.2.5 +matplotlib==3.10.5 +tqdm==4.67.1 +aiohttp==3.12.15 +h5py==3.14.0 +fsspec==2025.7.0 +ipykernel==6.30.0 \ No newline at end of file From 482b090a1e4bf9ac15652a911734ba0e69fcd089 Mon Sep 17 00:00:00 2001 From: fkiwit Date: Mon, 22 Sep 2025 09:39:47 +0200 Subject: [PATCH 02/21] Fixed the metadata and changed the number of characters to 100 using black --- .../low_depth_circuits_mnist/demo.py | 18 ++--- .../low_depth_circuits_mnist/metadata.json | 76 ++++++++++++++++++- 2 files changed, 79 insertions(+), 15 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 7dbd82620f..fea5fb19eb 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -53,9 +53,7 @@ def FRQI_encoding(images): n = 2 * int(np.log2(N)) # reorder pixels hierarchically states = np.reshape(images, (batchsize, *(2,) * n)) - states = np.transpose( - states, [0] + [ax + 1 for q in range(n // 2) for ax in (q, q + n // 2)] - ) + states = np.transpose(states, [0] + [ax + 1 for q in range(n // 2) for ax in (q, q + n // 2)]) # FRQI encoding by stacking cos and sin components states = np.stack([np.cos(np.pi / 2 * states), np.sin(np.pi / 2 * states)], axis=1) # normalize and reshape @@ -210,9 +208,7 @@ def circuit(params): circuit_layout = dataset_params.circuit_layout_d4 circuit = get_circuit(circuit_layout) params_01 = np.asarray(dataset_params.params_d4)[selection] -states_01 = np.asarray( - [circuit(params) for params in tqdm(params_01, desc="States for depth 4")] -) +states_01 = np.asarray([circuit(params) for params in tqdm(params_01, desc="States for depth 4")]) fidelities_01 = np.asarray(dataset_params.fidelities_d4)[selection] ###################################################################### @@ -368,9 +364,9 @@ def loss_acc(params, batch_x, batch_y): # training step @jax.jit def train_step(params, opt_state, batch_x, batch_y): - (loss, acc), grads = jax.value_and_grad( - lambda p: loss_acc(p, batch_x, batch_y), has_aux=True - )(params) + (loss, acc), grads = jax.value_and_grad(lambda p: loss_acc(p, batch_x, batch_y), has_aux=True)( + params + ) updates, opt_state = opt.update(grads, opt_state, params) return optax.apply_updates(params, updates), opt_state, loss, acc @@ -599,9 +595,7 @@ def MCRQI_decoding(states): N = int(np.sqrt(N2)) n = int(np.log2(N2)) # invert MCRQI encoding to get pixel values - images = ( - np.arccos(states[:, :3] ** 2 * 4 * N2 - states[:, 4:7] ** 2 * 4 * N2) / np.pi - ) + images = np.arccos(states[:, :3] ** 2 * 4 * N2 - states[:, 4:7] ** 2 * 4 * N2) / np.pi # undo hierarchical ordering images = np.reshape(images, (batchsize, 3, *(2,) * n)) images = np.transpose(images, [0, *range(2, n + 1, 2), *range(3, n + 2, 2), 1]) diff --git a/demonstrations_v2/low_depth_circuits_mnist/metadata.json b/demonstrations_v2/low_depth_circuits_mnist/metadata.json index 5bc8c1fc74..5f2ea1fe6b 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/metadata.json +++ b/demonstrations_v2/low_depth_circuits_mnist/metadata.json @@ -15,7 +15,9 @@ "executable_latest": true, "dateOfPublication": "2025-09-21T00:00:00+00:00", "dateOfLastModification": "2025-09-21T00:00:00+00:00", - "categories": [], + "categories": [ + "Quantum Machine Learning" + ], "tags": [], "previewImages": [ { @@ -25,8 +27,76 @@ ], "seoDescription": "This demo shows how to efficiently encode classical image data into quantum states using low-depth circuits, and train a variational quantum classifier on a low-depth MNIST dataset.", "doi": "", - "references": [], - "basedOnPapers": [], + "references": [ + { + "id": "kiwit2025", + "type": "article", + "title": "Typical Machine Learning Datasets as Low-Depth Quantum Circuits", + "authors": "Kiwit, F.J., Jobst, B., Luckow, A., Pollmann, F., Riofrío, C.A.", + "year": "2025", + "journal": "Quantum Sci. Technol.", + "doi": "10.1088/2058-9565/ae0123", + "url": "https://doi.org/10.1088/2058-9565/ae0123" + }, + { + "id": "le2011a", + "type": "article", + "title": "A flexible representation of quantum images for polynomial preparation, image compression, and processing operations", + "authors": "Le, P.Q., Dong, F., Hirota, K.", + "year": "2011", + "journal": "Quantum Inf. Process", + "doi": "10.1007/s11128-010-0177-y", + "url": "https://doi.org/10.1007/s11128-010-0177-y" + }, + { + "id": "le2011b", + "type": "incollection", + "title": "A Flexible Representation and Invertible Transformations for Images on Quantum Computers", + "authors": "Le, P.Q., Iliyasu, A.M., Dong, F., Hirota, K.", + "year": "2011", + "booktitle": "New Advances in Intelligent Signal Processing. Studies in Computational Intelligence, vol 372", + "publisher": "Springer, Berlin, Heidelberg", + "doi": "10.1007/978-3-642-11739-8_9", + "url": "https://doi.org/10.1007/978-3-642-11739-8_9" + }, + { + "id": "sun2011", + "type": "inproceedings", + "title": "A Multi-Channel Representation for Images on Quantum Computers Using the RGBα Color Space", + "authors": "Sun, B. et al.", + "year": "2011", + "booktitle": "2011 IEEE 7th International Symposium on Intelligent Signal Processing, Floriana, Malta", + "pages": "1–6", + "doi": "10.1109/WISP.2011.6051718", + "url": "https://doi.org/10.1109/WISP.2011.6051718" + }, + { + "id": "sun2013", + "type": "article", + "title": "An RGB Multi-Channel Representation for Images on Quantum Computers", + "authors": "Sun, B., Iliyasu, A., Yan, F., Dong, F., Hirota, K.", + "year": "2013", + "journal": "J. Adv. Comput. Intell. Intell. Inform.", + "volume": "17", + "number": "3", + "pages": "404–417", + "doi": "10.20965/jaciii.2013.p0404", + "url": "https://doi.org/10.20965/jaciii.2013.p0404" + }, + { + "id": "jobst2024", + "type": "article", + "title": "Efficient MPS representations and quantum circuits from the Fourier modes of classical image data", + "authors": "Jobst, B., Shen, K., Riofrío, C.A., Shishenina, E., Pollmann, F.", + "year": "2024", + "journal": "Quantum", + "volume": "8", + "pages": "1544", + "doi": "10.22331/q-2024-12-03-1544", + "url": "https://doi.org/10.22331/q-2024-12-03-1544" + } + ], + "basedOnPapers": ["10.1088/2058-9565/ae0123"], "referencedByPapers": [], "relatedContent": [] } \ No newline at end of file From cdba56eedd51b1157bfce2e3b5b826f59bd37a30 Mon Sep 17 00:00:00 2001 From: fkiwit Date: Mon, 22 Sep 2025 10:13:44 +0200 Subject: [PATCH 03/21] Remove redundant packages from demonstrations_v2/low_depth_circuits_mnist/requirements.in and update the reference types in demonstrations_v2/low_depth_circuits_mnist/metadata.json --- .../low_depth_circuits_mnist/metadata.json | 4 ++-- .../low_depth_circuits_mnist/requirements.in | 11 +---------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/metadata.json b/demonstrations_v2/low_depth_circuits_mnist/metadata.json index 5f2ea1fe6b..39c520f298 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/metadata.json +++ b/demonstrations_v2/low_depth_circuits_mnist/metadata.json @@ -50,7 +50,7 @@ }, { "id": "le2011b", - "type": "incollection", + "type": "other", "title": "A Flexible Representation and Invertible Transformations for Images on Quantum Computers", "authors": "Le, P.Q., Iliyasu, A.M., Dong, F., Hirota, K.", "year": "2011", @@ -61,7 +61,7 @@ }, { "id": "sun2011", - "type": "inproceedings", + "type": "other", "title": "A Multi-Channel Representation for Images on Quantum Computers Using the RGBα Color Space", "authors": "Sun, B. et al.", "year": "2011", diff --git a/demonstrations_v2/low_depth_circuits_mnist/requirements.in b/demonstrations_v2/low_depth_circuits_mnist/requirements.in index 36cb65be8a..22e889b763 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/requirements.in +++ b/demonstrations_v2/low_depth_circuits_mnist/requirements.in @@ -1,11 +1,2 @@ -pennylane==0.42.1 autoray==0.6.12 -jax==0.4.33 -jaxlib==0.4.33 -optax==0.2.5 -matplotlib==3.10.5 -tqdm==4.67.1 -aiohttp==3.12.15 -h5py==3.14.0 -fsspec==2025.7.0 -ipykernel==6.30.0 \ No newline at end of file +tqdm==4.67.1 \ No newline at end of file From 3c7710f9af28d4bd1c25e573aa3d6b637a057181 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:38:25 +0200 Subject: [PATCH 04/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index fea5fb19eb..166f8cbb9e 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -103,7 +103,7 @@ def FRQI_decoding(states): # effectively shifting the gates with the dashed outlines to the right, the gates are applied # sequentially outward starting from the center. This reduces the circuit depth while maintaining its # expressivity. - +# # .. figure:: /_static/demonstration_assets/low_depth_circuits_mnist/circuit.png # :align: center # :width: 80 % From 0c0c993674208ccaa3bb59f7a6a2bba93af21760 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:39:13 +0200 Subject: [PATCH 05/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 166f8cbb9e..117826c656 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -281,7 +281,7 @@ def circuit(params): # 3. Quantum classifiers # ---------------------- # -# In this demo, we train a **variational quantum circuit** as classifier. Our datasets require +# In this demo, we train a **variational quantum circuit** as a classifier. Our datasets require # ``N_QUBITS = 11``, therefore we use the same number of qubits for the classifier. Given a data state # :math:`\rho(x)=\lvert\psi(x)\rangle\langle\psi(x)\rvert`, a generic **quantum classifier** evaluates # :math:`f_{\ell}(x) = \operatorname{Tr}\bigl[ O_{\ell}(\theta)\,\rho(x) \bigr]`, with trainable From 30e62debe41c4403a9655b81d1743c946787948d Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:39:41 +0200 Subject: [PATCH 06/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 117826c656..a44f9a84eb 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -293,7 +293,7 @@ def circuit(params): # - **Quantum‑kernel SVMs** — replacing inner products by quantum state overlaps. # - **Tensor‑network (MPS/MPO) classifiers** for large qubit counts. # -# In this demo we use a small linear VQC. The circuit consisits of two qubit gates correspdonding to +# In this demo we use a small linear VQC. The circuit consists of two qubit gates corresponding to # the ``SO(4)`` gates # # .. figure:: /_static/demonstration_assets/low_depth_circuits_mnist/so4.png From fd7f7696e092969da69ae8f4a44c637fd4828e32 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:39:52 +0200 Subject: [PATCH 07/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index a44f9a84eb..148cf15237 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -1,7 +1,7 @@ r"""Loading classical data with low-depth circuits ============================================== -The encoding of arbitrary classical data into quantum states usually comes at a high computational +Encoding arbitrary classical data into quantum states usually comes at a high computational cost, either in terms of qubits or gate count. However, real-world data typically exhibits some inherent structure (such as image data) which can be leveraged to load them with a much smaller cost on a quantum computer. The paper **“Typical Machine Learning Datasets as Low‑Depth Quantum From c92aa4f54434ed223d2323680b982dc2db065563 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:40:02 +0200 Subject: [PATCH 08/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 148cf15237..9e7d7f3c30 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -385,7 +385,7 @@ def loader(X, y, batch_size, rng_key): # # We start by **casting** the FRQI amplitude vectors and their digit labels into JAX arrays. Next, the # states and labels are shuffled from a pseudorandom key derived from the global ``SEED``. Then, the -# data is split into training and validation. Finally, we gather the tensors corresponding in the +# data is split into training and validation. Finally, we gather the tensors corresponding to the # training ``(X_train, y_train)`` and validation sets ``(X_val, y_val)``. # From b874d329fb1b1228bf370f4f4b1240a6152614c5 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:40:20 +0200 Subject: [PATCH 09/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 9e7d7f3c30..bb69f7ff71 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -6,7 +6,7 @@ inherent structure (such as image data) which can be leveraged to load them with a much smaller cost on a quantum computer. The paper **“Typical Machine Learning Datasets as Low‑Depth Quantum Circuits”** (2025) [`1 <#References>`__] develops an efficient algorithm for finding low-depth -quantum circuits to load classical image data as quantum states. +quantum circuits to load classical image data into quantum states. This demo gives an introduction to the paper **“Typical Machine Learning Datasets as Low‑Depth Quantum Circuits”** (2025). We will discuss the following three steps: 1) Quantum image From 36a3841bd2d1dcec25523303b0cbb9fc23eb649a Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:48:33 +0200 Subject: [PATCH 10/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- .../low_depth_circuits_mnist/demo.py | 60 +++++++++++-------- 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index bb69f7ff71..b7906a81de 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -512,30 +512,42 @@ def loader(X, y, batch_size, rng_key): # References # ---------- # -# [1] F.J. Kiwit, B. Jobst, A. Luckow, F. Pollmann and C.A. Riofrío. Typical Machine Learning Datasets -# as Low-Depth Quantum Circuits. *Quantum Sci. Technol.* in press (2025). DOI: -# https://doi.org/10.1088/2058-9565/ae0123. -# -# [2] P.Q. Le, F. Dong and K. Hirota. A flexible representation of quantum images for polynomial -# preparation, image compression, and processing operations. *Quantum Inf. Process* 10, 63–84 (2011). -# DOI: https://doi.org/10.1007/s11128-010-0177-y. -# -# [3] P.Q. Le, A.M. Iliyasu, F. Dong, and K. Hirota. A Flexible Representation and Invertible -# Transformations for Images on Quantum Computers. In: Ruano, A.E., Várkonyi-Kóczy, A.R. (eds) *New -# Advances in Intelligent Signal Processing. Studies in Computational Intelligence*, vol 372. -# Springer, Berlin, Heidelberg (2011). DOI: https://doi.org/10.1007/978-3-642-11739-8_9. -# -# [4] B. Sun *et al.* A Multi-Channel Representation for images on quantum computers using the RGBα -# color space, 2011 *IEEE 7th International Symposium on Intelligent Signal Processing*, Floriana, -# Malta, pp. 1-6 (2011). DOI: https://doi.org/10.1109/WISP.2011.6051718. -# -# [5] B. Sun, A. Iliyasu, F. Yan, F. Dong, and K. Hirota. An RGB Multi-Channel Representation for -# Images on Quantum Computers, *J. Adv. Comput. Intell. Intell. Inform.*, Vol. 17 No. 3, pp. 404–417 -# (2013). DOI: https://doi.org/10.20965/jaciii.2013.p0404. -# -# [6] B. Jobst, K. Shen, C.A. Riofrío, E. Shishenina and F. Pollmann. Efficient MPS representations -# and quantum circuits from the Fourier modes of classical image data. *Quantum* 8, 1544 (2024). DOI: -# https://doi.org/10.22331/q-2024-12-03-1544. +# .. [#Kiwit] +# +# F.J. Kiwit, B. Jobst, A. Luckow, F. Pollmann and C.A. Riofrío. Typical Machine Learning Datasets +# as Low-Depth Quantum Circuits. *Quantum Sci. Technol.* in press (2025). DOI: +# https://doi.org/10.1088/2058-9565/ae0123. +# +# .. [#LeFlexible] +# +# P.Q. Le, F. Dong and K. Hirota. A flexible representation of quantum images for polynomial +# preparation, image compression, and processing operations. *Quantum Inf. Process* 10, 63–84 (2011). +# DOI: https://doi.org/10.1007/s11128-010-0177-y. +# +# .. [#LeAdvances] +# +# P.Q. Le, A.M. Iliyasu, F. Dong, and K. Hirota. A Flexible Representation and Invertible +# Transformations for Images on Quantum Computers. In: Ruano, A.E., Várkonyi-Kóczy, A.R. (eds) *New +# Advances in Intelligent Signal Processing. Studies in Computational Intelligence*, vol 372. +# Springer, Berlin, Heidelberg (2011). DOI: https://doi.org/10.1007/978-3-642-11739-8_9. +# +# .. [#SunMulti] +# +# B. Sun *et al.* A Multi-Channel Representation for images on quantum computers using the RGBα +# color space, 2011 *IEEE 7th International Symposium on Intelligent Signal Processing*, Floriana, +# Malta, pp. 1-6 (2011). DOI: https://doi.org/10.1109/WISP.2011.6051718. +# +# .. [#SunRGB] +# +# B. Sun, A. Iliyasu, F. Yan, F. Dong, and K. Hirota. An RGB Multi-Channel Representation for +# Images on Quantum Computers, *J. Adv. Comput. Intell. Intell. Inform.*, Vol. 17 No. 3, pp. 404–417 +# (2013). DOI: https://doi.org/10.20965/jaciii.2013.p0404. +# +# .. [#Jobst] +# +# B. Jobst, K. Shen, C.A. Riofrío, E. Shishenina and F. Pollmann. Efficient MPS representations +# and quantum circuits from the Fourier modes of classical image data. *Quantum* 8, 1544 (2024). DOI: +# https://doi.org/10.22331/q-2024-12-03-1544. # ###################################################################### From b33525f9bf5c7ab9df956a47ac0b8f7ac9addf22 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:50:14 +0200 Subject: [PATCH 11/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index b7906a81de..7ba4457d3a 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -286,7 +286,7 @@ def circuit(params): # :math:`\rho(x)=\lvert\psi(x)\rangle\langle\psi(x)\rvert`, a generic **quantum classifier** evaluates # :math:`f_{\ell}(x) = \operatorname{Tr}\bigl[ O_{\ell}(\theta)\,\rho(x) \bigr]`, with trainable # circuit parameters :math:`\theta` that rotate a measurement operator :math:`O_\ell`. Variants -# explored in the paper [`1 <#References>`__] include +# explored in the paper [#Kiwit]_ include # # - **Linear VQC** — sequential two‑qubit SU(4) layers (15 parameters per gate). # - **Non‑linear VQC** — gate parameters depend on input data *x* via auxiliary retrieval circuits. From 9684f3054f10b24f9d610e0eb1b14f52ccb66fa4 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:50:38 +0200 Subject: [PATCH 12/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 7ba4457d3a..e157f0e655 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -5,7 +5,7 @@ cost, either in terms of qubits or gate count. However, real-world data typically exhibits some inherent structure (such as image data) which can be leveraged to load them with a much smaller cost on a quantum computer. The paper **“Typical Machine Learning Datasets as Low‑Depth Quantum -Circuits”** (2025) [`1 <#References>`__] develops an efficient algorithm for finding low-depth +Circuits”** (2025) [#Kiwit]_ develops an efficient algorithm for finding low-depth quantum circuits to load classical image data into quantum states. This demo gives an introduction to the paper **“Typical Machine Learning Datasets as From dad72d98c2f4228fcabbea72d2164382dbfbc81b Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:51:04 +0200 Subject: [PATCH 13/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index e157f0e655..c18119da8c 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -23,7 +23,7 @@ # where the **address register** :math:`\left| j\right>` holds the pixel position (:math:`n` qubits), # and additional **color qubits** :math:`\left| c(x_j)\right>` encode the pixel intensities. For # grayscale images, we use the *flexible representation of quantum images (FRQI)* -# [`2 <#References>`__,\ `3 <#References>`__] as an encoding. In this case, the data value +# [#LeFlexible]_ [#LeAdvances]_ as an encoding. In this case, the data value # :math:`{x}_j` of each pixel is just a single number corresponding to the grayscale value of that # pixel. We can encode this information in the :math:`z`-polarization of an additional color qubit as # :math:`\left|c({x}_j)\right> = \cos({\textstyle\frac{\pi}{2}} {x}_j) \left| 0 \right> + \sin({\textstyle\frac{\pi}{2}} {x}_j) \left| 1 \right>`, From bbabcd6b78108b00996fb6d445260c21bfbd0af9 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:51:25 +0200 Subject: [PATCH 14/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index c18119da8c..5e5580bf1b 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -33,7 +33,7 @@ # For color images, the *multi-channel representation of quantum images (MCRQI)* # [`4 <#References>`__,\ `5 <#References>`__] can be used. Python implementations of the MCRQI # encoding and decoding are provided at the end of this demo and are discussed in Ref. -# [`1 <#References>`__]. +# [#Kiwit]_. # from pennylane import numpy as np From 25db1b774da9c6a374715e85ee1146fba3c9949c Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:51:43 +0200 Subject: [PATCH 15/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 5e5580bf1b..1971b713ae 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -88,7 +88,7 @@ def FRQI_decoding(states): # # In general, the complexity of preparing the resulting state *exactly* scales exponentially with the # number of qubits. Known constructions (without auxiliary qubits) use :math:`\mathcal{O}(4^n)` gates -# [`2 <#References>`__,\ `3 <#References>`__]. However, encoding typical images this way leads to +# [#LeFlexible]_ [#LeAdvances]_. However, encoding typical images this way leads to # lowly entangled quantum states that are well approximated by tensor-network states such as # matrix-product states (MPSs) [`6 <#References>`__] whose bond dimension :math:`\chi` does not need # to scale with the image resolution. Thus, preparing the state *approximately* with a small error is From 7e4ce55f894017d32323f48c03029339af7a2173 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:52:01 +0200 Subject: [PATCH 16/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 1971b713ae..500b0dbe1e 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -31,7 +31,7 @@ # pixels is encoded into a quantum state with :math:`n+1` qubits. # # For color images, the *multi-channel representation of quantum images (MCRQI)* -# [`4 <#References>`__,\ `5 <#References>`__] can be used. Python implementations of the MCRQI +# [#SunMulti]_ [#SunRGB]_ can be used. Python implementations of the MCRQI # encoding and decoding are provided at the end of this demo and are discussed in Ref. # [#Kiwit]_. # From 0573e42d4eceb991223ba05fe3aad76871edf5d9 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:52:18 +0200 Subject: [PATCH 17/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 500b0dbe1e..a7946edccc 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -90,7 +90,7 @@ def FRQI_decoding(states): # number of qubits. Known constructions (without auxiliary qubits) use :math:`\mathcal{O}(4^n)` gates # [#LeFlexible]_ [#LeAdvances]_. However, encoding typical images this way leads to # lowly entangled quantum states that are well approximated by tensor-network states such as -# matrix-product states (MPSs) [`6 <#References>`__] whose bond dimension :math:`\chi` does not need +# matrix-product states (MPSs) [#Jobst]_ whose bond dimension :math:`\chi` does not need # to scale with the image resolution. Thus, preparing the state *approximately* with a small error is # possible with a number of gates that scales only as :math:`\mathcal{O}(\chi^2n)`, i.e., linearly # with the number of qubits. While the cost of the classical preprocessing may be similar to the exact From 9197f4f5a389369c5f34800522b8076551f52176 Mon Sep 17 00:00:00 2001 From: fkiwit Date: Mon, 6 Oct 2025 23:21:27 +0200 Subject: [PATCH 18/21] Remove tqdm, remove table, add link to pennylane dataset, a bit more details one the three steps --- .../low_depth_circuits_mnist/demo.py | 56 ++++++++----------- 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index a7946edccc..041b2f5cab 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -9,9 +9,11 @@ quantum circuits to load classical image data into quantum states. This demo gives an introduction to the paper **“Typical Machine Learning Datasets as -Low‑Depth Quantum Circuits”** (2025). We will discuss the following three steps: 1) Quantum image -states, 2) Low-depth image circuits, 3) Training a small variational‑quantum‑circuit (VQC) -classifier on the dataset. +Low‑Depth Quantum Circuits”** (2025). We will discuss the following three steps: + +1. **Define** how classical images can be encoded as quantum states. +2. **Construct** low-depth quantum circuits that efficiently generate these image states. +3. **Train and evaluate** a small variational quantum circuit (VQC) classifier on the dataset. """ ###################################################################### @@ -121,24 +123,10 @@ def FRQI_decoding(states): # is loaded using ``qml.data.Dataset.open``. Otherwise, the dataset is downloaded from the PennyLane # data repository via ``qml.data.load``, note that the dataset size is approximately 1 GB. # -# ===================== ============================================================= -# Attribute Description -# ===================== ============================================================= -# ``exact_state`` The exact state that the corresponding circuit should prepare -# ``labels`` The correct labels classifying the corresponding images -# ``circuit_layout_d4`` The layout of the depth 4 circuit -# ``circuit_layout_d8`` The layout of the depth 8 circuit -# ``params_d4`` Parameters for the depth 4 circuit -# ``params_d8`` Parameters for the depth 8 circuit -# ``fidelities_d4`` Fidelities between the depth 4 state and the exact state -# ``fidelities_d8`` Fidelities between the depth 8 state and the exact state -# ===================== ============================================================= -# import os import jax import pennylane as qml -from tqdm import tqdm # JAX supports the single-precision numbers by default. The following line enables double-precision. jax.config.update("jax_enable_x64", True) @@ -164,7 +152,8 @@ def FRQI_decoding(states): # depth. After defining the circuit function, we extract the relevant data for binary classification # (digits 0 and 1 only) and compute the quantum states by executing the circuits with their # corresponding parameters. These generated states will be used later for training the quantum -# classifier. +# classifier. You can find more information and download the datasets at +# `PennyLane Datasets: Low-Depth Image Circuits `_. # TARGET_LABELS = [0, 1] @@ -208,7 +197,18 @@ def circuit(params): circuit_layout = dataset_params.circuit_layout_d4 circuit = get_circuit(circuit_layout) params_01 = np.asarray(dataset_params.params_d4)[selection] -states_01 = np.asarray([circuit(params) for params in tqdm(params_01, desc="States for depth 4")]) + +states_01 = [] +n = len(params_01) + +for i, params in enumerate(params_01): + states_01.append(circuit(params)) + # Print every 10% + if (i + 1) % (n // 10) == 0: + print(f"{(i + 1) / n * 100:.0f}% of the states computed") + +states_01 = np.asarray(states_01) + fidelities_01 = np.asarray(dataset_params.fidelities_d4)[selection] ###################################################################### @@ -414,8 +414,7 @@ def loader(X, y, batch_size, rng_key): # # We begin by **initializing** the network weights ``params`` with values drawn uniformly from # :math:`[0, 2\pi]` and initialize the **Adam optimizer** with a learning rate of -# :math:`1 \times 10^{-2}`. The **training loop** then iterates for ``EPOCHS`` and displays the -# progress via ``tqdm``: +# :math:`1 \times 10^{-2}`. The **training loop** then iterates for ``EPOCHS``: # # 1. For each mini-batch, ``train_step`` performs a forward pass, computes the cross-entropy loss and # accuracy, back-propagates gradients, and updates ``params`` through the optimizer state @@ -428,8 +427,6 @@ def loader(X, y, batch_size, rng_key): # The first epoch will take longer than following epochs because of the just-in-time compilation. # -from tqdm.auto import trange - # Define the training setup and start the training loop # optimizer @@ -441,9 +438,7 @@ def loader(X, y, batch_size, rng_key): rng = key_split train_loss_curve, val_loss_curve = [], [] train_acc_curve, val_acc_curve = [], [] -# for epoch in range(1, EPOCHS + 1): -bar = trange(1, EPOCHS + 1, desc="Epochs", unit="ep") -for epoch in bar: +for epoch in range(1, EPOCHS + 1): # train rng, sub = jax.random.split(rng) train_losses, train_accs = [], [] @@ -468,12 +463,9 @@ def loader(X, y, batch_size, rng_key): val_loss_curve.append(vl) train_acc_curve.append(ta) val_acc_curve.append(va) - bar.set_postfix( - train_loss=f"{tl:.4f}", - val_loss=f"{vl:.4f}", - train_acc=f"{ta:.4f}", - val_acc=f"{va:.4f}", - ) + print(f"Epoch {epoch:03d}/{EPOCHS} | " + f"train_loss={tl:.4f}, val_loss={vl:.4f}, " + f"train_acc={ta:.4f}, val_acc={va:.4f}") # Plot the training curves ( From 9021b07876c2d842b25b9efa219e10ff2535bb8f Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Sun, 2 Nov 2025 19:57:32 +0100 Subject: [PATCH 19/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 041b2f5cab..affb61aefe 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -499,8 +499,7 @@ def loader(X, y, batch_size, rng_key): # and contribute back insights on how these benchmark datasets can best support the development of # practical quantum machine learning approaches. # - -###################################################################### +# # References # ---------- # From e19a2726ea062fd1dca5a72cf973b33ccd047ac0 Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Sun, 2 Nov 2025 19:58:08 +0100 Subject: [PATCH 20/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index affb61aefe..879bd83b2f 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -486,11 +486,11 @@ def loader(X, y, batch_size, rng_key): # Conclusion # ~~~~~~~~~~ # -# | In this notebook we have demonstrated the use of low-depth quantum circuits to load and -# subsequently classify (a subset of) the MNIST dataset. -# | By filtering to specific target labels, constructing parametrized circuits from the provided -# layouts, and evaluating their states and fidelities, we have gained hands-on experience with -# quantum machine learning workflows on real data encodings. +# In this notebook we have demonstrated the use of low-depth quantum circuits to load and +# subsequently classify (a subset of) the MNIST dataset. +# By filtering to specific target labels, constructing parametrized circuits from the provided +# layouts, and evaluating their states and fidelities, we have gained hands-on experience with +# quantum machine learning workflows on real data encodings. # # Explore the full set of `provided # datasets `__—they contain a From 14135cf4ea3237890fccedfebcbc52470685cb5e Mon Sep 17 00:00:00 2001 From: fkiwit <74311424+fkiwit@users.noreply.github.com> Date: Sun, 2 Nov 2025 19:58:42 +0100 Subject: [PATCH 21/21] Update demonstrations_v2/low_depth_circuits_mnist/demo.py Co-authored-by: Diego <67476785+DSGuala@users.noreply.github.com> --- demonstrations_v2/low_depth_circuits_mnist/demo.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/demonstrations_v2/low_depth_circuits_mnist/demo.py b/demonstrations_v2/low_depth_circuits_mnist/demo.py index 879bd83b2f..06235cb352 100644 --- a/demonstrations_v2/low_depth_circuits_mnist/demo.py +++ b/demonstrations_v2/low_depth_circuits_mnist/demo.py @@ -218,8 +218,7 @@ def circuit(params): # To investigate how well the low-depth circuits reproduce the target images, we first **reconstruct** # the pictures encoded in each quantum state. The histogram below reports the *fidelity* # :math:`F = \left|\langle \psi_{\text{exact}} \mid \psi_{\text{circ.}} \rangle\right|^{2}`, i.e. the -# overlap between the exact FRQI state $ -# \|:raw-latex:`\psi`\_{:raw-latex:`\text{exact}`}:raw-latex:`\rangle `$ and its 4-layer +# overlap between the exact FRQI state :math:`|\psi\_{\text{exact}}\rangle` and its 4-layer # center-sequential approximation :math:`|\psi_{\text{circ.}}\rangle`. # # - **Digit 1** samples (orange) cluster at a fidelity :math:`F` close to 1, indicating that four