diff --git a/Manifest.toml b/Manifest.toml index da79e19..4df1ed7 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -1,5 +1,17 @@ # This file is machine-generated - editing it directly is not advised +[[AbstractFFTs]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "380e36c66edfa099cd90116b24c1ce8cafccac40" +uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" +version = "0.4.1" + +[[BackwardsLinalg]] +deps = ["LinearAlgebra", "Random", "Requires", "Zygote"] +path = "/home/andreas/.julia/dev/BackwardsLinalg" +uuid = "442b4e1a-8b9d-11e9-03b0-f14b31742153" +version = "0.1.0" + [[Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" @@ -16,16 +28,28 @@ uuid = "9e28174c-4ba2-5203-b857-d8d62c4213ee" version = "0.8.10" [[BinaryProvider]] -deps = ["Libdl", "SHA"] +deps = ["Libdl", "Logging", "SHA"] git-tree-sha1 = "c7361ce8a2129f20b0e05a89f7070820cfed6648" uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232" -version = "0.5.4" +version = "0.5.6" [[CSTParser]] -deps = ["LibGit2", "Test", "Tokenize"] -git-tree-sha1 = "437c93bc191cd55957b3f8dee7794b6131997c56" +deps = ["Tokenize"] +git-tree-sha1 = "0ff80f68f55fcde2ed98d7b24d7abaf20727f3f8" uuid = "00ebfdb7-1f24-5e51-bd34-a7502290713f" -version = "0.5.2" +version = "0.6.1" + +[[Calculus]] +deps = ["Compat"] +git-tree-sha1 = "f60954495a7afcee4136f78d1d60350abd37a409" +uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9" +version = "0.4.1" + +[[Combinatorics]] +deps = ["LinearAlgebra", "Polynomials", "Test"] +git-tree-sha1 = "50b3ae4d643dc27eaff69fb6be06ee094d5500c9" +uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" +version = "0.7.0" [[CommonSubexpressions]] deps = ["Test"] @@ -39,6 +63,12 @@ git-tree-sha1 = "84aa74986c5b9b898b0d1acaf3258741ee64754f" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" version = "2.1.0" +[[Conda]] +deps = ["JSON", "VersionParsing"] +git-tree-sha1 = "9a11d428dcdc425072af4aea19ab1e8c3e01c032" +uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" +version = "1.3.0" + [[Crayons]] deps = ["Test"] git-tree-sha1 = "f621b8ef51fd2004c7cf157ea47f027fdeac5523" @@ -46,10 +76,10 @@ uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" version = "4.0.0" [[DataStructures]] -deps = ["InteractiveUtils", "OrderedCollections", "Random", "Serialization", "Test"] -git-tree-sha1 = "ca971f03e146cf144a9e2f2ce59674f5bf0e8038" +deps = ["InteractiveUtils", "OrderedCollections"] +git-tree-sha1 = "0809951a1774dc724da22d26e4289bbaab77809a" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.15.0" +version = "0.17.0" [[Dates]] deps = ["Printf"] @@ -59,6 +89,12 @@ uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" deps = ["Mmap"] uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" +[[DiffEqDiffTools]] +deps = ["LinearAlgebra", "SparseArrays", "StaticArrays"] +git-tree-sha1 = "b992345a39b4d9681342ae795a8dacc100730182" +uuid = "01453d9d-ee7c-5054-8395-0335cb756afa" +version = "0.14.0" + [[DiffResults]] deps = ["Compat", "StaticArrays"] git-tree-sha1 = "34a4a1e8be7bc99bc9c611b895b5baf37a80584c" @@ -75,6 +111,12 @@ version = "0.0.10" deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" +[[FFTW]] +deps = ["AbstractFFTs", "BinaryProvider", "Compat", "Conda", "Libdl", "LinearAlgebra", "Reexport", "Test"] +git-tree-sha1 = "29cda58afbf62f35b1a094882ad6c745a47b2eaa" +uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" +version = "0.2.4" + [[FillArrays]] deps = ["LinearAlgebra", "Random", "SparseArrays", "Test"] git-tree-sha1 = "9ab8f76758cbabba8d7f103c51dce7f73fcf8e92" @@ -89,21 +131,26 @@ version = "0.10.3" [[IRTools]] deps = ["InteractiveUtils", "MacroTools", "Test"] -git-tree-sha1 = "4d0c6647aa89b3678d03967ca11c8a5cdc83d2b5" +git-tree-sha1 = "41a26a81a043808a5eb7da44a2f9ef86ec7ed905" repo-rev = "master" repo-url = "https://github.com/MikeInnes/IRTools.jl.git" uuid = "7869d1d1-7146-5819-86e3-90919afe41df" -version = "0.2.1" +version = "0.2.2" [[InteractiveUtils]] deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +[[IterTools]] +git-tree-sha1 = "2ebe60d7343962966d1779a74a760f13217a6901" +uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" +version = "1.2.0" + [[JSON]] -deps = ["Dates", "Distributed", "Mmap", "Sockets", "Test", "Unicode"] -git-tree-sha1 = "1f7a25b53ec67f5e9422f1f551ee216503f4a0fa" +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "b34d7cef7b337321e97d22242c3c2b91f476748e" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.20.0" +version = "0.21.0" [[LibGit2]] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" @@ -111,13 +158,11 @@ uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" [[Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" -[[LinalgBackwards]] -deps = ["LinearAlgebra", "Random", "Zygote"] -git-tree-sha1 = "fed67af4bbc3cb33f287bc2673366774ba4cf589" -repo-rev = "master" -repo-url = "https://github.com/GiggleLiu/LinalgBackwards.jl.git" -uuid = "8dad3b0d-1a4d-5faa-9471-8c404674cf7c" -version = "0.1.1" +[[LineSearches]] +deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf", "Test"] +git-tree-sha1 = "54eb90e8dbe745d617c78dee1d6ae95c7f6f5779" +uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" +version = "7.0.1" [[LinearAlgebra]] deps = ["Libdl"] @@ -127,18 +172,30 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" [[MacroTools]] -deps = ["CSTParser", "Compat", "DataStructures", "Test"] -git-tree-sha1 = "daecd9e452f38297c686eba90dba2a6d5da52162" +deps = ["CSTParser", "Compat", "DataStructures", "Test", "Tokenize"] +git-tree-sha1 = "d6e9dedb8c92c3465575442da456aec15a89ff76" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.0" +version = "0.5.1" [[Markdown]] deps = ["Base64"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" +[[Missings]] +deps = ["SparseArrays", "Test"] +git-tree-sha1 = "f0719736664b4358aa9ec173077d4285775f8007" +uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" +version = "0.4.1" + [[Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" +[[NLSolversBase]] +deps = ["Calculus", "DiffEqDiffTools", "DiffResults", "Distributed", "ForwardDiff", "LinearAlgebra", "Random", "SparseArrays", "Test"] +git-tree-sha1 = "0c6f0e7f2178f78239cfb75310359eed10f2cacb" +uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" +version = "7.3.1" + [[NNlib]] deps = ["Libdl", "LinearAlgebra", "Requires", "Statistics", "TimerOutputs"] git-tree-sha1 = "0c667371391fc6bb31f7f12f96a56a17098b3de8" @@ -152,38 +209,66 @@ uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" version = "0.3.2" [[OMEinsum]] -deps = ["IRTools", "PkgBenchmark", "TupleTools", "Zygote"] -git-tree-sha1 = "7689a992f08f32ec5db07067939bccbc7389e846" -repo-rev = "master" -repo-url = "https://github.com/under-Peter/OMEinsum.jl.git" +deps = ["Combinatorics", "IRTools", "PkgBenchmark", "TensorOperations", "TupleTools", "Zygote"] +git-tree-sha1 = "4f315c18035bd8d9cf38abc522302d9077f4cac3" uuid = "ebe7aa44-baf0-506c-a96f-8464559b3922" version = "0.1.0" +[[Optim]] +deps = ["Calculus", "DiffEqDiffTools", "FillArrays", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "PositiveFactorizations", "Printf", "Random", "SparseArrays", "StatsBase"] +git-tree-sha1 = "05efd63b639afdd3425909cd3af73fe9c1373cf3" +uuid = "429524aa-4258-5aef-a3af-852621145aeb" +version = "0.19.1" + [[OrderedCollections]] deps = ["Random", "Serialization", "Test"] git-tree-sha1 = "c4c13474d23c60d20a67b217f1d7f22a40edf8f1" uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" version = "1.1.0" +[[Parameters]] +deps = ["Markdown", "OrderedCollections", "REPL", "Test"] +git-tree-sha1 = "70bdbfb2bceabb15345c0b54be4544813b3444e4" +uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" +version = "0.10.3" + +[[Parsers]] +deps = ["Dates", "Test"] +git-tree-sha1 = "db2b35dedab3c0e46dc15996d170af07a5ab91c9" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "0.3.6" + [[Pkg]] deps = ["Dates", "LibGit2", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" [[PkgBenchmark]] -deps = ["BenchmarkTools", "Dates", "InteractiveUtils", "JSON", "LibGit2", "Pkg", "Printf", "ProgressMeter", "Random", "Statistics", "Test"] -git-tree-sha1 = "cf641fb115ca4c97ce0a20bdddc3ba973724c61e" +deps = ["BenchmarkTools", "Dates", "InteractiveUtils", "JSON", "LibGit2", "Pkg", "Printf", "ProgressMeter"] +git-tree-sha1 = "6ec2e9ab9f35005bf73704ca48ccda0c963b0a68" uuid = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" -version = "0.2.1" +version = "0.2.2" + +[[Polynomials]] +deps = ["LinearAlgebra", "SparseArrays", "Test"] +git-tree-sha1 = "62142bd65d3f8aeb2226ec64dd8493349147df94" +uuid = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" +version = "0.5.2" + +[[PositiveFactorizations]] +deps = ["LinearAlgebra", "Test"] +git-tree-sha1 = "957c3dd7c33895469760ce873082fbb6b3620641" +uuid = "85a6dd25-e78a-55b7-8502-1745935b8125" +version = "0.2.2" [[Printf]] deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[ProgressMeter]] -deps = ["Distributed", "Printf", "Random", "Test"] -git-tree-sha1 = "48058bc11607676e5bbc0b974af79106c6200787" +deps = ["Distributed", "Printf"] +git-tree-sha1 = "0f08e0e74e5b160ca20d3962a2620038b75881c7" uuid = "92933f4c-e287-5a05-a399-4b506db050ca" -version = "0.9.0" +version = "1.0.0" [[REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets"] @@ -193,6 +278,12 @@ uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" deps = ["Serialization"] uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +[[Reexport]] +deps = ["Pkg"] +git-tree-sha1 = "7b1d07f411bc8ddb7977ec7f377b97b158514fe0" +uuid = "189a3867-3050-52da-a836-e630ba90ab69" +version = "0.2.0" + [[Requires]] deps = ["Test"] git-tree-sha1 = "f6fbf4ba64d295e146e49e021207993b6b48c7d1" @@ -212,6 +303,12 @@ uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" [[Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" +[[SortingAlgorithms]] +deps = ["DataStructures", "Random", "Test"] +git-tree-sha1 = "03f5898c9959f8115e30bc7226ada7d0df554ddd" +uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" +version = "0.3.1" + [[SparseArrays]] deps = ["LinearAlgebra", "Random"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -232,6 +329,24 @@ version = "0.11.0" deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +[[StatsBase]] +deps = ["DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics"] +git-tree-sha1 = "2b6ca97be7ddfad5d9f16a13fe277d29f3d11c23" +uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +version = "0.31.0" + +[[Strided]] +deps = ["LinearAlgebra", "TupleTools"] +git-tree-sha1 = "b929f81ca5e78362a4c1e935bb5e7bee27dc2279" +uuid = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" +version = "0.3.1" + +[[TensorOperations]] +deps = ["LinearAlgebra", "Strided", "TupleTools"] +git-tree-sha1 = "2804781ab5e899a0e4c61a4d655e4dea88553710" +uuid = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" +version = "1.1.0" + [[Test]] deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" @@ -243,10 +358,9 @@ uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" version = "0.5.0" [[Tokenize]] -deps = ["Printf", "Test"] -git-tree-sha1 = "3e83f60b74911d3042d3550884ca2776386a02b8" +git-tree-sha1 = "c8a8b00ae44a94950814ff77850470711a360225" uuid = "0796e94c-ce3b-5d07-9a54-7f471281c624" -version = "0.5.3" +version = "0.5.5" [[TupleTools]] deps = ["Random", "Test"] @@ -267,10 +381,16 @@ uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [[Unicode]] uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" +[[VersionParsing]] +deps = ["Compat"] +git-tree-sha1 = "c9d5aa108588b978bd859554660c8a5c4f2f7669" +uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" +version = "1.1.3" + [[Zygote]] -deps = ["DiffRules", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics"] -git-tree-sha1 = "4b9c0799b296c73b60397ed167b6455c31820d06" +deps = ["DiffRules", "FFTW", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics"] +git-tree-sha1 = "3e024f0c5e23c37206418fac6343c149604124d0" repo-rev = "master" repo-url = "https://github.com/FluxML/Zygote.jl.git" uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" -version = "0.3.1" +version = "0.3.2" diff --git a/Project.toml b/Project.toml index 4f7c4b8..f3d9fb6 100644 --- a/Project.toml +++ b/Project.toml @@ -4,9 +4,15 @@ authors = ["Andreas Peter "] version = "0.1.0" [deps] +BackwardsLinalg = "442b4e1a-8b9d-11e9-03b0-f14b31742153" IRTools = "7869d1d1-7146-5819-86e3-90919afe41df" -LinalgBackwards = "8dad3b0d-1a4d-5faa-9471-8c404674cf7c" +IterTools = "c8e1da08-722c-5040-9ed9-7db0dc04731e" +LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" +Optim = "429524aa-4258-5aef-a3af-852621145aeb" +ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/src/TensorNetworkAD.jl b/src/TensorNetworkAD.jl index 69712bd..774e5c5 100644 --- a/src/TensorNetworkAD.jl +++ b/src/TensorNetworkAD.jl @@ -1,11 +1,17 @@ module TensorNetworkAD -using Zygote, LinalgBackwards +using Zygote, BackwardsLinalg using OMEinsum export trg, num_grad +export ctmrg +export optimiseipeps +export heisenberghamiltonian, isinghamiltonian, isingtensor -include("einsum.jl") include("trg.jl") +include("fixedpoint.jl") +include("ctmrg.jl") include("autodiff.jl") +include("variationalipeps.jl") +include("exampletensors.jl") end # module diff --git a/src/autodiff.jl b/src/autodiff.jl index caa1378..7b0c4d6 100644 --- a/src/autodiff.jl +++ b/src/autodiff.jl @@ -1,6 +1,8 @@ using Zygote -@Zygote.nograd contractionindices +@Zygote.adjoint function Base.typed_hvcat(::Type{T}, rows::Tuple{Vararg{Int}}, xs::S...) where {T,S} + Base.typed_hvcat(T,rows, xs...), ȳ -> (nothing, nothing, permutedims(ȳ)...) +end @doc raw" num_grad(f, K::Real; [δ = 1e-5]) @@ -15,6 +17,7 @@ return the numerical gradient of `f` for each element of `K`. " function num_grad(f, a::AbstractArray; δ::Real = 1e-5) map(CartesianIndices(a)) do i - num_grad(a[i], x -> (ac = copy(a); ac[i] = x; f(ac)), δ) + foo = x -> (ac = copy(a); ac[i] = x; f(ac)) + num_grad(foo, a[i], δ = δ) end end diff --git a/src/ctmrg.jl b/src/ctmrg.jl new file mode 100644 index 0000000..5d4b4f7 --- /dev/null +++ b/src/ctmrg.jl @@ -0,0 +1,90 @@ +using BackwardsLinalg, OMEinsum +using LinearAlgebra: normalize, norm, diag +using Random +using IterTools: iterated +using Base.Iterators: take, drop + +function initializec(a, χ, randinit) + c = zeros(eltype(a), χ, χ) + if randinit + rand!(c) + c += adjoint(c) + else + cinit = einsum("ijkl -> ij", (a,)) + foreach(CartesianIndices(cinit)) do i + i in CartesianIndices(c) && (c[i] = cinit[i]) + end + end + return c +end + +function initializet(a, χ, randinit) + t = zeros(eltype(a), χ, size(a,1), χ) + if randinit + rand!(t) + t += permutedims(conj(t), (3,2,1)) + else + tinit = einsum("ijkl -> ijl", (a,)) + foreach(CartesianIndices(tinit)) do i + i in CartesianIndices(t) && (t[i] = tinit[i]) + end + end + return t +end + +@Zygote.nograd initializec, initializet + +function ctmrg(a, χ, tol, maxit::Integer, randinit = false) + d = size(a,1) + # initialize + cinit = initializec(a, χ, randinit) + tinit = initializet(a, χ, randinit) + oldvals = fill(Inf, χ*d) + + stopfun = StopFunction(oldvals, -1, tol, maxit) + c, t, = fixedpoint(ctmrgstep, (cinit, tinit, oldvals), (a, χ, d), stopfun) + return c, t +end + +function ctmrgstep((c,t,vals), (a, χ, d)) + # grow + # cp = einsum("ad,iba,dcl,jkcb -> ijlk", (c, t, t, a)) + ct = einsum("ad,iba -> dib", (c,t)) + ctt = einsum("dib,dcl -> bcil", (ct,t)) + ctta = einsum("bcil, jkcb -> ijlk", (ctt,a)) + cp = ctta + tp = einsum("iam,jkla -> ijklm", (t,a)) + + # renormalize + cpmat = reshape(cp, χ*d, χ*d) + cpmat += adjoint(cpmat) + u, s, v = svd(cpmat) + z = reshape(u[:, 1:χ], χ, d, χ) + + # c = einsum("abcd,abi,cdj -> ij", (cp, conj(z), z)) + cz = einsum("abcd,abi -> cdi", (cp, conj(z))) + c = einsum("cdi,cdj -> ij", (cz, z)) + + # t = einsum("abjcd,abi,dck -> ijk", (tp, conj(z), z)) + tz = einsum("abjcd,abi -> ijcd", (tp, conj(z))) + t = einsum("ijcd,dck -> ijk", (tz,z)) + + vals = s ./ s[1] + + + # symmetrize + c += adjoint(c) + t += permutedims(conj(t), (3,2,1)) + + #gauge fix + c *= sign(c[1]) + signs = sign.(t[:,2,1]) + # t = einsum("i,ijk,k -> ijk", (signs, t, signs)) + t = t .* reshape(signs,:,1,1) .* reshape(signs,1,1,:) + + # normalize + c /= norm(c) + t /= norm(t) + + return c, t, vals +end diff --git a/src/einsum.jl b/src/einsum.jl deleted file mode 100644 index b4868f5..0000000 --- a/src/einsum.jl +++ /dev/null @@ -1,39 +0,0 @@ -using OMEinsum -using TupleTools -@doc raw" - contractionindices(ixs, iy) -calculate the indices of the intermediate tensor -resulting from contracting the first two indices of -`ixs`. -returns both the indices of the contraction and the intermediate -result indices. - -# Example - -Product of three matrices -```julia -julia> contractionindices(((1,2),(2,3),(3,4)), (1,4)) -(((1, 2), (2, 3)), (1, 3)) -``` -" -function contractionindices(ixs::NTuple{N, T where T}, iy) where N - N <= 2 && return (ixs, iy) - ix12 = unique(vcat(collect(ixs[1]), collect(ixs[2]))) - allothers = vcat(map(collect,ixs[3:end])..., collect(iy)) - ((ixs[1], ixs[2]), Tuple(i for i in ix12 if i in allothers)) -end - -@doc raw" - meinsum(ixs, xs, iy) -like `einsum(ixs,xs,iy)` but contracting pairwise from -left to right. -" -function meinsum(ixs, xs, iy) - ixstmp, iytmp = contractionindices(ixs, iy) - xstmp = Tuple(xs[i] for i in 1:length(ixstmp)) - x = einsum(ixstmp, xstmp, iytmp) - length(xs) <= 2 && return x - nixs = (iytmp, map(i -> ixs[i], (3:length(ixs)...,))...) - nxs = (x, map(i -> xs[i], (3:length(ixs)...,))...) - return meinsum(nixs, nxs, iy) -end diff --git a/src/exampletensors.jl b/src/exampletensors.jl new file mode 100644 index 0000000..f1250e5 --- /dev/null +++ b/src/exampletensors.jl @@ -0,0 +1,45 @@ +# tensor for classical 2-d model +const isingβc = log(1+sqrt(2))/2 + +function tensorfromclassical(ham::Matrix) + wboltzmann = exp.(ham) + q = sqrt(wboltzmann) + einsum("ij,ik,il,im -> jklm", (q,q,q,q)) +end + +function isingtensor(β) + a = reshape(Float64[1 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 1], 2,2,2,2) + cβ, sβ = sqrt(cosh(β)), sqrt(sinh(β)) + q = 1/sqrt(2) * [cβ+sβ cβ-sβ; cβ-sβ cβ+sβ] + einsum("abcd,ai,bj,ck,dl -> ijkl", (a,q,q,q,q)) +end + +function isingmagtensor(β) + a = reshape([1 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 -1] , 2,2,2,2) + cβ, sβ = sqrt(cosh(β)), sqrt(sinh(β)) + q = 1/sqrt(2) * [cβ+sβ cβ-sβ; cβ-sβ cβ+sβ] + einsum("abcd,ai,bj,ck,dl -> ijkl", (a,q,q,q,q)) +end + +function magnetisationofβ(β, χ) + a = isingtensor(β) + m = isingmagtensor(β) + c, t, = ctmrg(a, χ, 1e-6, 100, true) + ctc = einsum("ia,ajb,bk -> ijk", (c,t,c)) + env = einsum("alc,ckd,bjd,bia -> ijkl",(ctc,t,ctc,t)) + mag = einsum("ijkl,ijkl ->", (env,m))[] + norm = einsum("ijkl,ijkl ->", (env,a))[] + + return abs(mag/norm) +end + +function eoeofβ(β,χ) + a = isingtensor(β) + m = isingmagtensor(β) + c, t, = ctmrg(a, χ, 1e-6, 100) + d = diag(c) + d = d ./ norm(d,1) + return -sum(d .* log2.(d)) +end + +magofβ(β) = β > isingβc ? (1-sinh(2*β)^-4)^(1/8) : 0. diff --git a/src/fixedpoint.jl b/src/fixedpoint.jl new file mode 100644 index 0000000..00162d3 --- /dev/null +++ b/src/fixedpoint.jl @@ -0,0 +1,61 @@ +using Base.Iterators: drop, take +using IterTools: iterated, imap + +function fixedpoint(f, guess, init, stopfun) + for state in iterated(x -> f(x,init), guess) + stopfun(state) && return state + end +end + +mutable struct StopFunction{T,S} + oldvals::T + counter::Int + tol::S + maxit::Int +end + + +@Zygote.nograd StopFunction +function (st::StopFunction)(state) + st.counter += 1 + st.counter > st.maxit && return true + + vals = state[3] + diff = norm(vals - st.oldvals) + diff <= st.tol && return true + st.oldvals = vals + + return false +end + +function fixedpointbackward(next, (c,t,vals), (a, χ, d)) + _, back = Zygote.forward(next,(c,t,vals),(a,χ,d)) + back1 = x -> back(x)[1] + back2 = x -> back(x)[2] + + function backΔ(Δ) + grad = back2(Δ)[1] + for g in take(imap(back2,drop(iterated(back1, Δ),1)),100) + grad .+= g[1] + ng = norm(g[1]) + if ng < 1e-7 + break + elseif ng > 10 + println("backprop not converging") + # try to minimise damage by scaling to small + grad ./= norm(grad) + grad .*= 1e-4 + break + end + end + (grad, nothing, nothing) + end + return backΔ +end + +fixedpointAD(f, g, n, sf) = fixedpoint(f, g, n ,sf) + +@Zygote.adjoint function fixedpointAD(f, guess, n, stopfun) + r = fixedpoint(f, guess, n, stopfun) + return r, Δ -> (nothing, nothing, fixedpointbackward(f, r, n)(Δ), nothing) +end diff --git a/src/nctmrg.jl b/src/nctmrg.jl new file mode 100644 index 0000000..5192dae --- /dev/null +++ b/src/nctmrg.jl @@ -0,0 +1,327 @@ +using Base.Iterators: drop, take +using IterTools: iterated, imap + +function fixedpoint(f, guess, init, stopfun) + for state in iterated(x -> f(x,init), guess) + stopfun(state) && return state + end +end + +mutable struct StopFunction{T,S} + oldvals::T + counter::Int + tol::S + maxit::Int +end + + +function (st::StopFunction)(state) + st.counter += 1 + st.counter > st.maxit && return true + + # vals = state[3] + # diff = norm(vals - st.oldvals) + # diff <= st.tol && return true + # st.oldvals = vals + + return false +end + +#= + Following Corboz, Rice and Troyer in **Competing states in the t-J model: uniform d-wave state versus stripe state** + p.7 +=# +using OMEinsum +using BackwardsLinalg +using LinearAlgebra + +function initializec(a, χ, randinit) + c = zeros(eltype(a), χ, χ) + if randinit + rand!(c) + c += adjoint(c) + else + cinit = ein"ijkl -> ij"(a) + foreach(CartesianIndices(cinit)) do i + i in CartesianIndices(c) && (c[i] = cinit[i]) + end + end + return c +end + +function initializet(a, χ, randinit) + t = zeros(eltype(a), χ, size(a,1), χ) + if randinit + rand!(t) + t += permutedims(conj(t), (3,2,1)) + else + tinit = ein"ijkl -> ijl"(a) + foreach(CartesianIndices(tinit)) do i + i in CartesianIndices(t) && (t[i] = tinit[i]) + end + end + return t +end + +function initializeunitcell(initfun, as, χ, randinit, T, lvecs) + uc = UnitCell(Dict{Tuple{Int,Int},T}(), lvecs) + foreach(u -> uc[u...] = initfun(as[u], χ, randinit), keys(as)) + return uc +end + +function initializeunitcells(A, χ, randinit = false) + lvecs = A.lvecs + CT = Array{eltype(eltype(A)),2} + TT = Array{eltype(eltype(A)),3} + + ats = A.tensors + C1 = initializeunitcell(initializec, ats, χ, randinit, CT, lvecs) + T1 = initializeunitcell(initializet, ats, χ, randinit, TT, lvecs) + + # rotate counter-clockwise + ats = Dict(k => ein"ijkl -> lijk"(v) for (k,v) in ats) + C2 = initializeunitcell(initializec, ats, χ, randinit, CT, lvecs) + T2 = initializeunitcell(initializet, ats, χ, randinit, TT, lvecs) + + # rotate counter-clockwise + ats = Dict(k => ein"ijkl -> lijk"(v) for (k,v) in ats) + C3 = initializeunitcell(initializec, ats, χ, randinit, CT, lvecs) + T3 = initializeunitcell(initializet, ats, χ, randinit, TT, lvecs) + + # rotate counter-clockwise + ats = Dict(k => ein"ijkl -> lijk"(v) for (k,v) in ats) + C4 = initializeunitcell(initializec, ats, χ, randinit, CT, lvecs) + T4 = initializeunitcell(initializet, ats, χ, randinit, TT, lvecs) + + return C1, C2, C3, C4, T1, T2, T3, T4 +end + +function nctmrg(a, χ, randinit = false) + d = size(a[1,1],1) + C1234T1234 = initializeunitcells(a, χ, randinit) + + oldvals = fill(Inf, 4χ*d) + tol = 0 + maxit = 10 + + stopfun = StopFunction(oldvals, -1, tol, maxit) + C1234T1234 = nctmrgstep((C1234T1234..., oldvals), (a, χ, d))[1:end-1] + #C1234T1234 = fixedpoint(nctmrgstep, (C1234T1234..., oldvals), (a, χ, d), stopfun)[1:end-1] + return C1234T1234 +end + +function nctmrgstep((C1, C2, C3, C4, T1, T2, T3, T4, oldvals), (a, χ, d)) + C4, T4, C1 = leftmove(C1, C2, C3, C4, T1, T2, T3, T4, a, χ) + C1, T1, C2 = upmove(C1, C2, C3, C4, T1, T2, T3, T4, a, χ) + C2, T2, C3 = rightmove(C1, C2, C3, C4, T1, T2, T3, T4, a, χ) + C3, T3, C4 = downmove(C1, C2, C3, C4, T1, T2, T3, T4, a, χ) + + return (C1, C2, C3, C4, T1, T2, T3, T4, oldvals) +end + +function leftmove(C1,C2,C3,C4,T1,T2,T3,T4,a,χ) + Ps, Pts = similar.((T4,T4)) + C4p, T4p, C1p = similar.((C4, T4, C1)) + + foreach(keys(a.tensors)) do k + Ps[k...], Pts[k...] = leftisommetries(C1,C2,C3,C4,T1,T2,T3,T4,a,χ,k) + end + foreach(keys(a.tensors)) do k + C4p[k...], T4p[k...], C1p[k...] = leftmove(C1,T1,T4,a,C4,T3,Ps,Pts, k) + end + + return C4p, T4p, C1p +end + +function leftmove(C1,T1,T4,A,C4,T3,Ps,Pts, (x,y)) + @ein C1[1,2] := C1[x,y][-1,-2] * T1[x+1,y][-2,-3,2] * Pts[x,y][-1,-3,1] + @ein T4[1,2,3] := T4[x,y][-1,-2,-3] * A[x+1,y][-4,2,-5,-2] * + Ps[x,y-1][-3,-5,3] * Pts[x,y][-1,-4,1] + @ein C4[1,2] := C4[x,y][-1,-2] * T3[x+1,y][1,-3,-1] * Ps[x,y-1][-2,-3,2] + map(x -> x / norm(x), (C4, T4, C1)) +end + +function leftisommetries(C1,C2,C3,C4,T1,T2,T3,T4,A,χ,(x,y)) + upperhalf, lowerhalf = horizontalcut(C1,C2,C3,C4,T1,T2,T3,T4,A,(x,y)) + d = size(A[1,1],1) + + lupmat, _ = lq(reshape(upperhalf, d*χ, d*χ)) + ldnmat, _ = lq(reshape(lowerhalf, d*χ, d*χ)) + lup = reshape(lupmat, χ,d,χ*d) + ldn = reshape(ldnmat, χ,d,χ*d) + + @ein lupldn[1,2] := lupmat[-1,1] * ldnmat[-1,2] + + u, s, vd = svd(lupldn) + cutoff = min(χ, length(s)) + u = u[:,1:cutoff] + vd = vd[1:cutoff, :] + + sqrts = sqrt.(pinv.(view(s,1:cutoff))) + + @ein PT[1,2,3] := ldn[1,2,-1] * conj(vd)[3,-1] * sqrts[3] + @ein P[1,2,3] := lup[1,2,-1] * conj(u)[ -1,3] * sqrts[3] + + return P, PT +end + +function rightmove(C1,C2,C3,C4,T1,T2,T3,T4,a,χ) + Ps, Pts = similar.((T2,T2)) + C2p, T2p, C3p = similar.((C2, T2, C3)) + + foreach(keys(a.tensors)) do k + Ps[k...], Pts[k...] = rightisommetries(C1,C2,C3,C4,T1,T2,T3,T4,a,χ,k) + end + foreach(keys(a.tensors)) do k + C2p[k...], T2p[k...], C3p[k...] = rightmove(C2,T1,T2,a,C3,T3,Ps,Pts, k) + end + + return C2p, T2p, C3p +end + +function rightmove(C2,T1,T2,A,C3,T3,Ps,Pts, (x,y)) + @ein C2[1,2] := C2[x,y][-1,-2] * T1[x-1,y][1,-3,-1] * Pts[x,y][-2,-3,2] + @ein T2[1,2,3] := T2[x,y][-1,-2,-3] * A[x-1,y][-5,-2,-4,2] * + Ps[x,y-1][-1,-4,1] * Pts[x,y][-3,-5,3] + @ein C3[1,2] := C3[x,y][-1,-2] * T3[x-1,y][-2,-3,2] * Ps[x,y-1][-1,-3,1] + map(x -> x / norm(x), (C2, T2, C3)) +end + +function rightisommetries(C1,C2,C3,C4,T1,T2,T3,T4,A,χ,(x,y)) + upperhalf, lowerhalf = horizontalcut(C1,C2,C3,C4,T1,T2,T3,T4,A,(x,y)) + d = size(A[1,1],1) + + _, rupmat = qr(reshape(upperhalf, d*χ, d*χ)) + _, rdnmat = qr(reshape(lowerhalf, d*χ, d*χ)) + rup = reshape(rupmat, χ*d, d, χ) + rdn = reshape(rdnmat, χ*d, d, χ) + + @ein ruprdn[1,2] := rupmat[1,-1] * rdnmat[2,-1] + u, s, vd = svd(ruprdn) + cutoff = min(χ, length(s)) + u = u[:,1:cutoff] + vd = vd[1:cutoff, :] + + sqrts = sqrt.(pinv.(view(s,1:cutoff))) + + @ein PT[1,2,3] := rdn[-1,2,1] * conj(vd)[3,-1] * sqrts[3] + @ein P[1,2,3] := rup[-1,2,1] * conj(u)[-1,3] * sqrts[3] + + return P, PT +end + +function horizontalcut(C1,C2,C3,C4,T1,T2,T3,T4,A,(x,y)) + @ein upperhalf[1,2,3,4] := C1[x ,y ][-1,-2] * T4[x ,y+1][1,-3,-1] * + T1[x+1,y ][-2,-4,-5] * A[x+1,y+1][2,-6,-4,-3]* + T1[x+2,y ][-5,-7,-8] * A[x+2,y+1][3,-9,-7,-6] * + C2[x+3,y ][-8,-10] * T2[x+3,y+1][-10,-9,4] + + @ein lowerhalf[1,2,3,4] := C3[x+3,y+3][-1,-2] * T2[x+3,y+2][4,-3,-1] * + T3[x+2,y+3][-2,-4,-5] * A[x+2,y+2][-4,-3,3,-6] * + T3[x+1,y+3][-5,-7,-8] * A[x+1,y+2][-7,-6,2,-9] * + C4[x ,y+3][-8,-10] * T4[x ,y+2][-10,-9,1] + return upperhalf, lowerhalf +end + +function downmove(C1,C2,C3,C4,T1,T2,T3,T4,a,χ) + Ps, Pts = similar.((T2,T2)) + C3p, T3p, C4p = similar.((C2, T2, C3)) + + foreach(keys(a.tensors)) do k + Ps[k...], Pts[k...] = downisommetries(C1,C2,C3,C4,T1,T2,T3,T4,a,χ,k) + end + foreach(keys(a.tensors)) do k + C3p[k...], T3p[k...], C4p[k...] = downmove(C3,T2,T3,a,C4,T4,Ps,Pts, k) + end + + return C3p, T3p, C4p +end + +function downisommetries(C1,C2,C3,C4,T1,T2,T3,T4,A,χ,(x,y)) + lefthalf, righthalf = verticalcut(C1,C2,C3,C4,T1,T2,T3,T4,A,(x,y)) + d = size(A[1,1],1) + + llmat, _ = lq(reshape(lefthalf, χ*d, χ*d)) + lrmat, _ = lq(reshape(righthalf, χ*d, χ*d)) + + ll = reshape(llmat, χ,d,χ*d) + lr = reshape(lrmat, χ,d,χ*d) + + @ein lllr[1,2] := llmat[-1,1] * lrmat[-1,2] + u, s, vd = svd(lllr) + cutoff = min(χ, length(s)) + u = u[:,1:cutoff] + vd = vd[1:cutoff, :] + sqrts = sqrt.(pinv.(view(s,1:cutoff))) + + @ein PT[1,2,3] := lr[1,2,-1] * conj(vd)[3,-1] * sqrts[3] + @ein P[1,2,3] := ll[1,2,-1] * conj(u)[-1,3] * sqrts[3] + + return P, PT +end + +function downmove(C3,T2,T3,A,C4,T4,Ps,Pts,(x,y)) + @ein C3[1,2] := C3[x,y][-1,-2] * T2[x,y-1][1,-3,-1] * Pts[x,y][-2,-3,2] + @ein T3[1,2,3] := T3[x,y][-1,-2,-3] * A[x-1,y][-2,-4,2,-5] * + Ps[x,y-1][-3,-5,3] * Pts[x,y][-1,-4,1] + @ein C4[1,2] := C4[x,y][-1,-2] * T4[x-1,y][-2,-3,2] * Ps[x,y-1][-1,-3,1] + + map(x -> x / norm(x), (C3, T3, C4)) +end + +function upmove(C1,C2,C3,C4,T1,T2,T3,T4,a,χ) + Ps, Pts = similar.((T1,T1)) + C1p, T1p, C2p = similar.((C1, T1, C2)) + + foreach(keys(a.tensors)) do k + Ps[k...], Pts[k...] = upisommetries(C1,C2,C3,C4,T1,T2,T3,T4,a,χ,k) + end + foreach(keys(a.tensors)) do k + C1p[k...], T1p[k...], C2p[k...] = upmove(C1,T4,T1,a,C2,T2,Ps,Pts, k) + end + + return C1p, T1p, C2p +end + +function upisommetries(C1,C2,C3,C4,T1,T2,T3,T4,A,χ,(x,y)) + lefthalf, righthalf = verticalcut(C1,C2,C3,C4,T1,T2,T3,T4,A,(x,y)) + d = size(A[1,1],1) + + _, rlmat = qr(reshape(lefthalf, χ*d, χ*d)) + _, rrmat = qr(reshape(righthalf, χ*d, χ*d)) + + rl = reshape(rlmat, χ*d, d, χ) + rr = reshape(rrmat, χ*d, d, χ) + + @ein rlrr[1,2] := rl[1,-1,-2] * rr[2,-1,-2] + u, s, vd = svd(rlrr) + cutoff = min(χ, length(s)) + u, vd = u[:,1:cutoff], vd[1:cutoff, :] + sqrts = sqrt.(pinv.(view(s,1:cutoff))) + + @ein PT[1,2,3] := rl[-1,2,1] * conj(vd)[3,-1] * sqrts[3] + @ein P[1,2,3] := rr[-1,2,1] * conj(u)[-1,3] * sqrts[3] + + P, PT +end + +function upmove(C1,T4,T1,A,C2,T2,Ps,Pts, (x,y)) + @ein C1[1,2] := C1[x,y][-1,-2] * T4[x,y][1,-3,-1] * Pts[x,y][-2,-3,2] + @ein T1[1,2,3] := T1[x,y][-1,-2,-3] * A[x,y][2,-5,-2,-4] * + Ps[x,y-1][-1,-4,1] * Pts[x,y][-3,-5,3] + @ein C2[1,2] := C2[x,y][-1,-2] * T2[x,y][-2,-3,2] * Ps[x,y-1][-1,-3,1] + + map(x -> x / norm(x), (C1, T1, C2)) +end + +function verticalcut(C1,C2,C3,C4,T1,T2,T3,T4,A,(x,y)) + @ein lefthalf[1,2,3,4] := C4[x ,y+3][-1,-2] * T3[x+1,y+3][1,-3,-1] * + T4[x ,y+2][-2,-4,-5] * A[x+1,y+2][-3,2,-6,-4] * + T4[x ,y+1][-5,-7,-8] * A[x+1,y+1][-6,3,-9,-7] * + C1[x ,y ][-8,-10] * T1[x+1,y ][-10,-9,4] + @ein righthalf[1,2,3,4] := C2[x+3,y ][-1,-2] * T1[x+2,y ][4,-3,-1] * + T2[x+3,y+1][-2,-4,-5] * A[x+2,y+1][-6,-4,-3,3] * + T2[x+3,y+2][-5,-7,-8] * A[x+2,y+2][-9,-7,-6,2] * + C3[x+3,y+3][-8,-10] * T3[x ,y+3][-10,-9,1] + return lefthalf, righthalf +end diff --git a/src/trg.jl b/src/trg.jl index 7559490..bed2e3d 100644 --- a/src/trg.jl +++ b/src/trg.jl @@ -1,40 +1,49 @@ # using OMEinsum -using LinalgBackwards +using BackwardsLinalg -function trg(k, χ, niter) - D = 2 - inds = 1:D - M = [sqrt(cosh(k)) sqrt(sinh(k)) ; sqrt(cosh(k)) -sqrt(sinh(k))] - T = meinsum(((-1,1),(-1,2),(-1,3),(-1,4)), (M,M,M,M), (1,2,3,4)) +@doc raw" + trg(a, χ, niter) +return the partition-function of a two-dimensional system of size `2^niter` +described by the tensor `a` calculated via the tensor renormalization group +algorithm. +`a` is a rank-4 tensor with the following indices: - lnZ = zero(k) + |1 + 4--[a]--2 + 3| +" +function trg(a::AbstractArray{T,4}, χ, niter; tol::Float64 = 1e-16) where T + lnZ = zero(T) for n in 1:niter - maxval = maximum(T) - T /= maxval - lnZ += 2^(niter-n+1)*log(maxval) + maxval = maximum(abs.(a)) + a /= maxval + lnZ += 2.0^(1-n)*log(maxval) - d2 = size(T,1)^2 - Ma = reshape(meinsum(((1,2,3,4),), (T,),(3,2,1,4)), d2, d2) - Mb = reshape(meinsum(((1,2,3,4),), (T,),(4,3,2,1)), d2, d2) - s1, s3 = trg_svd(Ma, χ) - s2, s4 = trg_svd(Mb, χ) + dr_ul = einsum("urdl -> drul", (a,)) + ld_ru = einsum("urdl -> ldru", (a,)) + dr, ul = trg_svd(dr_ul, χ, tol) + ld, ru = trg_svd(ld_ru, χ, tol) - T = meinsum(((-1,-2,1), (-2,-3,2), (3,-3,-4), (4,-4,-1)), (s1,s2,s3,s4),(1,2,3,4)) + # a = einsum("npu,por,dom,lmn -> urdl", (dr,ld,ul,ru)) + drld = einsum("npu,por -> nuor", (dr, ld)) + drldul = einsum("nuor,dom -> nurdm", (drld, ul)) + a = einsum("nurdm, lmn -> urdl", (drldul, ru)) end - trace = meinsum(((1,2,1,2),), (T,), ())[1] - lnZ += log(trace) + trace = einsum("ijij -> ", (a,))[] + lnZ += log(trace)/2.0^niter return lnZ end -function trg_svd(Ma, Dmax; tol::Float64=1e-12) - U, S, V = svd(Ma) - Dmax = min(searchsortedfirst(S, tol, rev=true), Dmax) - D = isqrt(size(Ma, 1)) - FS = S[1:Dmax] +function trg_svd(t, dmax, tol) + d1, d2, d3, d4 = size(t) + tmat = reshape(t, d1*d2, d3*d4) + u, s, v = svd(tmat) + dmax = min(searchsortedfirst(s, tol, rev=true), dmax, length(s)) + FS = s[1:dmax] sqrtFSp = sqrt.(FS) - S1 = reshape(meinsum(((1,2),(2,)), (U[:,1:Dmax], sqrtFSp), (1,2)), (D, D, Dmax)) - S3 = reshape(meinsum(((1,2),(1,)), (copy(V')[1:Dmax,:], sqrtFSp), (1,2)), (Dmax, D, D)) + u = reshape(einsum("ij,j -> ij", (u[:,1:dmax], sqrtFSp)), (d1, d2, dmax)) + v = reshape(einsum("ij,i -> ij", (copy(v')[1:dmax,:], sqrtFSp)), (dmax, d3, d4)) - S1, S3 + return u, v end diff --git a/src/unitcell.jl b/src/unitcell.jl new file mode 100644 index 0000000..da81c50 --- /dev/null +++ b/src/unitcell.jl @@ -0,0 +1,45 @@ +using StaticArrays +using Parameters + +""" + UnitCell{TA} +holds: + - tensors of type `TA` in a dictionary (field `tensors`) + - lattice vectors (field `lvecs`) + - transformation matrix to map cartesian coordinates into the unitcell (field `m`) +""" +struct UnitCell{TA} + tensors::Dict{Tuple{Int,Int}, TA} + lvecs::NTuple{2,SVector{2,Int}} + m::SArray{Tuple{2,2},Float64,2,4} + UnitCell(ts, lvecs) = new{eltype(values(ts))}(ts,lvecs, inv(hcat(lvecs...))) +end + +Base.eltype(::UnitCell{TA}) where TA = TA + +Base.similar(uc::UnitCell) = UnitCell(typeof(uc.tensors)(), uc.lvecs) + + +""" + shiftcoordinates(v, m, (l1,l2)) +move from cartesian coordinates `v` to unitcell coordinates described by +lattice vectors `l1`,`l2`, transformation matrix `m`. +""" +function shiftcoordinates(v::SVector{2}, m, (l1,l2)) + s1, s2 = round.(Int,m * v, RoundDown) + x, y = v - s1*l1 - s2*l2 + one.(v) + return x,y +end + +function Base.getindex(uc::UnitCell, x::T, y::T) where T <: Integer + @unpack m, lvecs, tensors = uc + x, y = shiftcoordinates(SVector{2,T}(x,y), m, lvecs) + return tensors[(x,y)] +end + +function Base.setindex!(uc::UnitCell, A, x::T, y::T) where T <: Integer + @unpack m, lvecs, tensors = uc + x, y = shiftcoordinates(SVector{2,T}(x,y), m, lvecs) + tensors[(x,y)] = A + return uc +end diff --git a/src/variationalipeps.jl b/src/variationalipeps.jl new file mode 100644 index 0000000..4158283 --- /dev/null +++ b/src/variationalipeps.jl @@ -0,0 +1,78 @@ +using Optim, LineSearches +using LinearAlgebra: I, norm + +const σx = [0 1; 1 0] +const σy = [0 -1im; 1im 0] +const σz = [1 0; 0 -1] +const id2 = [1 0; 0 1] + +function symmetrize(x::AbstractArray{<:Any,5}) + x += permutedims(x, (1,4,3,2,5)) # left-right + x += permutedims(x, (3,2,1,4,5)) # up-down + x += permutedims(x, (2,1,4,3,5)) # diagonal + x += permutedims(x, (4,3,2,1,5)) # rotation + return x / norm(x) +end + +function issym(x::AbstractArray{<:Any,5}) + x ≈ permutedims(x, (2,3,4,1,5)) || return false + x ≈ permutedims(x, (3,4,1,2,5)) || return false + x ≈ permutedims(x, (4,1,2,3,5)) || return false + return true +end + +function diaglocalhamiltonian(diag::Vector) + n = length(diag) + h = einsum("i -> ii", (diag,)) + id = Matrix(I,n,n) + reshape(h,n,n,1,1) .* reshape(id,1,1,n,n) .+ reshape(h,1,1,n,n) .* reshape(id,n,n,1,1) +end + +function tfisinghamiltonian(hx::Float64 = 1.0) + -2 * einsum("ij,kl -> ijkl", (σz,σz)) - + hx/2 * einsum("ij,kl -> ijkl", (σx, id2)) - + hx/2 * einsum("ij,kl -> ijkl", (id2, σx)) +end + +function heisenberghamiltonian(;Jz::Float64 = 1.0, Jx::Float64 = 1.0, Jy::Float64 = 1.0) + h = Jz * einsum("ij,kl -> ijkl", (σz,σz)) - + Jx * einsum("ij,kl -> ijkl", (σx, σx)) - + Jy * einsum("ij,kl -> ijkl", (σy, σy)) + h = einsum("ijcd,kc,ld -> ijkl", (h,σx,σx')) + real(h ./ 2) +end + +function energy(h, tin, χ, tol, maxit) + tin = symmetrize(tin) + d = size(tin,1) + ap = einsum("abcdx,ijkly -> aibjckdlxy", (tin, conj(tin))) + ap = reshape(ap, d^2, d^2, d^2, d^2, size(tin,5), size(tin,5)) + a = einsum("ijklaa -> ijkl", (ap,)) + c, t = ctmrg(a, χ, tol, maxit) + e = expectationvalue(h, ap, (c,t)) + + return e +end + +function expectationvalue(h, a, (c,t)) + # l = einsum("ab,ica,bde,cjfdlm,eg,gfk -> ijklm",(c,t,t,a,c,t)) + a /= norm(a) + ct = einsum("ij,aki -> jak",(c,t)) + ctt = einsum("jak,jle -> akle",(ct,t)) + ctta = einsum("akle,kbfldm -> abfedm",(ctt,a)) + cttac = einsum("abfedm,eg -> abfgdm",(ctta,c)) + cttact = einsum("abfgdm, gfc -> abcdm",(cttac,t)) + l = cttact + e = einsum("abcij,abckl,ijkl -> ", (l,l,h))[] + n = einsum("ijkaa,ijkbb -> ", (l,l))[] + return e/n +end + +function optimiseipeps(t, h, χ, tol, maxit; + optimargs = (), + optimmethod = LBFGS(m = 20)) + let energy = x -> real(energy(h, x, χ, tol, maxit)) + res = optimize(energy, + Δ -> Zygote.gradient(energy,Δ)[1], t, optimmethod, inplace = false, optimargs...) + end +end diff --git a/test.jl b/test.jl new file mode 100644 index 0000000..8c56baf --- /dev/null +++ b/test.jl @@ -0,0 +1 @@ +uc = UnitCell(Dict((1,1) => rand(2,2,2,2)), (SVector(1,0), SVector(0,1))) diff --git a/test/ctmrg.jl b/test/ctmrg.jl index 78a2fbc..78ca265 100644 --- a/test/ctmrg.jl +++ b/test/ctmrg.jl @@ -1,11 +1,26 @@ using TensorNetworkAD +using TensorNetworkAD: magnetisationofβ, magofβ using Test, Random using Zygote + @testset "ctmrg" begin - Random.seed!(2) - χ = 5 - niter = 5 - # $ python 2_variational_iPEPS/variational.py -model TFIM -D 3 -chi 10 -Niter 10 -Nepochs 10 - @test_broken isapprox(ctmrg(:TFIM; nepochs=10, χ=10, D=3, niter=10).E, -2.1256619, atol=1e-5) -end + Random.seed!(1) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(magnetisationofβ(0,2), magofβ(0), atol=1e-6) + @isdefined(pmobj) && next!(pmobj) + @test magnetisationofβ(1,2) ≈ magofβ(1) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(magnetisationofβ(0.2,10), magofβ(0.2), atol = 1e-4) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(magnetisationofβ(0.4,10), magofβ(0.4), atol = 1e-3) + @isdefined(pmobj) && next!(pmobj) + @test magnetisationofβ(0.6,4) ≈ magofβ(0.6) + @isdefined(pmobj) && next!(pmobj) + @test magnetisationofβ(0.8,2) ≈ magofβ(0.8) + + foo = x -> magnetisationofβ(x,2) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(Zygote.gradient(foo,0.5)[1], num_grad(foo,0.5), atol = 1e-2) + +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 18ae187..c4eb6c8 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,7 +1,14 @@ +using ProgressMeter +pmobj = Progress(29, ) using TensorNetworkAD using Test +using Random + +Random.seed!(4) @testset "TensorNetworkAD.jl" begin include("trg.jl") include("ctmrg.jl") + include("variationalipeps.jl") end +println() \ No newline at end of file diff --git a/test/trg.jl b/test/trg.jl index 9d98dfe..0e8c867 100644 --- a/test/trg.jl +++ b/test/trg.jl @@ -1,15 +1,28 @@ using TensorNetworkAD using Test using Zygote +using TensorNetworkAD: isingtensor @testset "trg" begin - χ = 5 - niter = 5 - foo = x -> trg(x, χ, niter) - # the pytorch result with tensorgrad - # https://github.com/wangleiphy/tensorgrad - # clone this repo and type - # $ python 1_ising_TRG/ising.py -chi 5 -Niter 5 - @test foo(0.4)/2^niter ≈ 0.8919788686747141 - @test num_grad(foo, 0.4, δ=1e-6) ≈ Zygote.gradient(foo, 0.4)[1] + @testset "real" + χ, niter = 5, 5 + foo = β -> trg(isingtensor(β), χ, niter) + # the pytorch result with tensorgrad + # https://github.com/wangleiphy/tensorgrad + # clone this repo and type + # $ python 1_ising_TRG/ising.py -chi 5 -Niter 5 + @isdefined(pmobj) && next!(pmobj) + @test foo(0.4) ≈ 0.8919788686747141 + @isdefined(pmobj) && next!(pmobj) + @test num_grad(foo, 0.4, δ=1e-6) ≈ Zygote.gradient(foo, 0.4)[1] + end + @testset "complex" begin + β, χ, niter = 0.4, 12, 3 + @isdefined(pmobj) && next!(pmobj) + @test trg(isingtensor(β), χ, niter) ≈ + real(trg(isingtensor(β) .+ 0im, χ, niter)) + @isdefined(pmobj) && next!(pmobj) + @test Zygote.gradient(β -> trg(isingtensor(β), χ, niter), 0.4)[1] ≈ + real(Zygote.gradient(β -> real(trg(isingtensor(β) .+ 0im, χ, niter)), 0.4)[1]) + end end diff --git a/test/variationalipeps.jl b/test/variationalipeps.jl new file mode 100644 index 0000000..1027688 --- /dev/null +++ b/test/variationalipeps.jl @@ -0,0 +1,168 @@ +using Test +using TensorNetworkAD +using TensorNetworkAD: diaglocalhamiltonian, energy, expectationvalue, optimiseipeps, + tfisinghamiltonian, heisenberghamiltonian, + symmetrize, num_grad +using OMEinsum, Zygote, Random +using LinearAlgebra: svd, norm +using Optim + +@testset "variationalipeps" begin + @testset "non-interacting" begin + h = diaglocalhamiltonian([1,-1]) + as = (rand(3,3,3,3,2) for _ in 1:100) + @isdefined(pmobj) && next!(pmobj) + @test all(a -> -1 < energy(h,a,5,0,10)/2 < 1, as) + + h = diaglocalhamiltonian([1,-1]) + a = zeros(2,2,2,2,2) .+ 1e-12 * randn(2,2,2,2,2) + a[1,1,1,1,2] = randn() + @isdefined(pmobj) && next!(pmobj) + @test energy(h,a,4,1e-12,100)/2 ≈ -1 + + a = zeros(2,2,2,2,2) .+ 1e-12 * randn(2,2,2,2,2) + a[1,1,1,1,1] = randn() + @isdefined(pmobj) && next!(pmobj) + @test energy(h,a,10,0,300)/2 ≈ 1 + + a = zeros(2,2,2,2,2) .+ 1e-12 * randn(2,2,2,2,2) + a[1,1,1,1,2] = a[1,1,1,1,1] = randn() + @isdefined(pmobj) && next!(pmobj) + @test abs(energy(h,a,10,0,300)) < 1e-9 + + + hdiag = [0.3,0.1,-0.43] + h = diaglocalhamiltonian(hdiag) + a = randn(2,2,2,2,3) + res = optimiseipeps(a, h, 4, 0, 100, + optimargs = (Optim.Options(f_tol=1e-6, show_trace=false),)) + e = minimum(res)/2 + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, minimum(hdiag), atol=1e-3) + end + + @testset "ising" begin + h = zeros(2,2,2,2) + h[1,1,2,2] = h[2,2,1,1] = 1 + h[2,2,2,2] = h[1,1,1,1] = -1 + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 4, 0, 100, + optimargs = (Optim.Options(f_tol=1e-6, show_trace=false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e,-1, atol=1e-3) + + h = zeros(2,2,2,2) + h[1,1,2,2] = h[2,2,1,1] = 1 + h[2,2,2,2] = h[1,1,1,1] = -1 + randu, s, = svd(randn(2,2)) + h = einsum("abcd,ai,bj,ck,dl -> ijkl", (h,randu,randu',randu,randu')) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 6, 0, 200, + optimargs = (Optim.Options(f_tol=1e-6, show_trace=false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e,-1, atol=1e-3) + + # comparison with results from https://github.com/wangleiphy/tensorgrad + h = tfisinghamiltonian(1.0) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 5, 0, 100, + optimargs = (Optim.Options(f_tol=1e-6, show_trace=false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, -2.12566, atol = 1e-3) + + h = tfisinghamiltonian(0.5) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 5, 0, 100, + optimargs = (Optim.Options(f_tol=1e-6, show_trace=false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, -2.0312, atol = 1e-2) + + Random.seed!(0) + h = tfisinghamiltonian(2.0) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 6, 1e-9, 100, + optimargs = (Optim.Options(f_tol=1e-8, show_trace=false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, -2.5113, atol = 1e-3) + end + + @testset "heisenberg" begin + # comparison with results from https://github.com/wangleiphy/tensorgrad + h = heisenberghamiltonian(Jz = 1.) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 5, 0, 100, + optimargs = (Optim.Options(f_tol=1e-6, show_trace=false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, -0.66023, atol = 1e-3) + + # Random.seed!(0) + h = heisenberghamiltonian(Jx = 2., Jy = 2.) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 6, 0, 100, #optimmethod = Optim.LBFGS(), + optimargs = (Optim.Options(f_tol = 1e-6, show_trace = false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, -1.190, atol = 1e-2) + + h = heisenberghamiltonian(Jx = 0.5, Jy = 0.5, Jz = 2.0) + a = randn(2,2,2,2,2) + res = optimiseipeps(a, h, 5, 0, 100, + optimargs = (Optim.Options(f_tol = 1e-6, show_trace = false),)) + e = minimum(res) + @isdefined(pmobj) && next!(pmobj) + @test isapprox(e, -1.0208, atol = 1e-3) + end + + @testset "gradient" begin + Random.seed!(0) + h = heisenberghamiltonian() + a = symmetrize(randn(2,2,2,2,2)) + gradzygote = first(Zygote.gradient(a) do x + energy(h,x,4,0,100) + end) + gradnum = num_grad(a, δ=1e-3) do x + energy(h,x,4,0,100) + end + + @isdefined(pmobj) && next!(pmobj) + @test isapprox(gradzygote, gradnum, atol=1e-3) + end + + @testset "complex" begin + h = heisenberghamiltonian() + a = symmetrize(randn(2,2,2,2,2)) + @isdefined(pmobj) && next!(pmobj) + @test energy(h,a,4,1e-12,100) ≈ energy(h,a .+ 0im,4,1e-12,100) + ϕ = exp(1im * rand()* 2π) + @isdefined(pmobj) && next!(pmobj) + @test energy(h,a .* ϕ,4,1e-12,100) ≈ energy(h,a,4,1e-12,100) + + gradzygote = first(Zygote.gradient(a) do x + real(energy(h,x,4,1e-12,100)) + end) + + @isdefined(pmobj) && next!(pmobj) + @test gradzygote ≈ first(Zygote.gradient(a .+ 0im) do x + real(energy(h,x,4,1e-12,100)) + end) + + # real + h = heisenberghamiltonian() + a = symmetrize(randn(2,2,2,2,2)) + res1 = optimiseipeps(a, h, 10, 1e-12, 20, + optimargs = (Optim.Options(f_tol=1e-6, store_trace = true, show_trace=false),)); + + # complex + a = symmetrize(randn(2,2,2,2,2) .+ randn(2,2,2,2,2) .* 1im) + res2 = optimiseipeps(a, h, 20, 1e-12, 50, + optimargs = (Optim.Options(f_tol=1e-6,store_trace = true, show_trace=false),)); + @isdefined(pmobj) && next!(pmobj) + @test isapprox(minimum(res1), minimum(res2), atol = 1e-5) + end +end