From 525a2b57ea00c384e16f42c16619ca5ad4206ffe Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 26 Sep 2023 04:18:05 +0800 Subject: [PATCH 1/6] Create CUDA and GenericTensorNetworks extensions --- Project.toml | 16 +++++++++++----- src/cuda.jl => ext/TensorInferenceCUDAExt.jl | 9 ++++++++- .../TensorInferenceGTNExt.jl | 12 +++++++----- src/TensorInference.jl | 10 ++++------ src/mar.jl | 2 +- src/utils.jl | 2 ++ 6 files changed, 33 insertions(+), 18 deletions(-) rename src/cuda.jl => ext/TensorInferenceCUDAExt.jl (71%) rename src/generictensornetworks.jl => ext/TensorInferenceGTNExt.jl (90%) diff --git a/Project.toml b/Project.toml index 33aebd5..594d2cf 100644 --- a/Project.toml +++ b/Project.toml @@ -5,9 +5,7 @@ version = "0.4.0" [deps] Artifacts = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" @@ -16,13 +14,21 @@ Requires = "ae029012-a4dd-5104-9daa-d747884805df" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" TropicalNumbers = "b3a74e9c-7526-4576-a4eb-79c0d4c32334" +[weakdeps] +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" +GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" + +[extensions] +TensorInferenceCUDAExt = "CUDA" +TensorInferenceGTNExt = "GenericTensorNetworks" + [compat] -CUDA = "4" +CUDA = "4, 5" DocStringExtensions = "0.8.6, 0.9" GenericTensorNetworks = "1" OMEinsum = "0.7" PrecompileTools = "1" Requires = "1" StatsBase = "0.34" -TropicalNumbers = "0.5.4" -julia = "1.3" +TropicalNumbers = "0.5.4, 0.6" +julia = "1.9" diff --git a/src/cuda.jl b/ext/TensorInferenceCUDAExt.jl similarity index 71% rename from src/cuda.jl rename to ext/TensorInferenceCUDAExt.jl index d2c6fc1..40204fc 100644 --- a/src/cuda.jl +++ b/ext/TensorInferenceCUDAExt.jl @@ -1,4 +1,7 @@ -using .CUDA: CuArray +module TensorInferenceCUDAExt +using CUDA: CuArray +import CUDA +import TensorInference: match_arraytype, keep_only!, onehot_like, togpu function onehot_like(A::CuArray, j) mask = zero(A) @@ -15,3 +18,7 @@ function keep_only!(x::CuArray{T}, j) where T CUDA.@allowscalar x[j] = hotvalue return x end + +togpu(x::AbstractArray) = CuArray(x) + +end \ No newline at end of file diff --git a/src/generictensornetworks.jl b/ext/TensorInferenceGTNExt.jl similarity index 90% rename from src/generictensornetworks.jl rename to ext/TensorInferenceGTNExt.jl index e8a9f6f..c141eba 100644 --- a/src/generictensornetworks.jl +++ b/ext/TensorInferenceGTNExt.jl @@ -1,7 +1,8 @@ -using .GenericTensorNetworks: generate_tensors, GraphProblem, flavors, labels - -# update models -export update_temperature +module TensorInferenceGTNExt +using TensorInference, TensorInference.OMEinsum +using TensorInference: TYPEDSIGNATURES, Factor +import TensorInference: update_temperature +using GenericTensorNetworks: generate_tensors, GraphProblem, flavors, labels """ $TYPEDSIGNATURES @@ -64,4 +65,5 @@ It is about one or two hours of works. If you need it, please file an issue to l end @info "`TensorInference` loaded `GenericTensorNetworks` extension successfully, -`TensorNetworkModel` and `MMAPModel` can be used for converting a `GraphProblem` to a probabilistic model now." \ No newline at end of file +`TensorNetworkModel` and `MMAPModel` can be used for converting a `GraphProblem` to a probabilistic model now." +end \ No newline at end of file diff --git a/src/TensorInference.jl b/src/TensorInference.jl index 5b02146..00f43ac 100644 --- a/src/TensorInference.jl +++ b/src/TensorInference.jl @@ -34,6 +34,10 @@ export sample # MMAP export MMAPModel +# for GenericTensorNetworks +export update_temperature +function update_temperature end + include("Core.jl") include("RescaledArray.jl") include("utils.jl") @@ -42,12 +46,6 @@ include("map.jl") include("mmap.jl") include("sampling.jl") -using Requires -function __init__() - @require CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" include("cuda.jl") - @require GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" include("generictensornetworks.jl") -end - # import PrecompileTools # PrecompileTools.@setup_workload begin # # Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the size of the diff --git a/src/mar.jl b/src/mar.jl index 94cc5ee..49d0225 100644 --- a/src/mar.jl +++ b/src/mar.jl @@ -7,7 +7,7 @@ function adapt_tensors(code, tensors, evidence; usecuda, rescale) map(tensors, ixs) do t, ix dims = map(ixi -> ixi ∉ keys(evidence) ? Colon() : ((evidence[ixi] + 1):(evidence[ixi] + 1)), ix) t2 = t[dims...] - t3 = usecuda ? CuArray(t2) : t2 + t3 = usecuda ? togpu(t2) : t2 rescale ? rescale_array(t3) : t3 end end diff --git a/src/utils.jl b/src/utils.jl index 4ead589..3b4a7c8 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -305,3 +305,5 @@ function get_artifact_path(artifact_name::String) artifact_hash = Pkg.Artifacts.artifact_hash(artifact_name, artifact_toml) return Pkg.Artifacts.artifact_path(artifact_hash) end + +togpu(x) = error("You must import CUDA with `using CUDA` before using GPU!") \ No newline at end of file From 21d966c5719b43f018b44e11f7d1698df63fc75e Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 26 Sep 2023 04:20:59 +0800 Subject: [PATCH 2/6] update project version --- Project.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Project.toml b/Project.toml index 594d2cf..5a7b714 100644 --- a/Project.toml +++ b/Project.toml @@ -10,7 +10,6 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -Requires = "ae029012-a4dd-5104-9daa-d747884805df" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" TropicalNumbers = "b3a74e9c-7526-4576-a4eb-79c0d4c32334" @@ -28,7 +27,6 @@ DocStringExtensions = "0.8.6, 0.9" GenericTensorNetworks = "1" OMEinsum = "0.7" PrecompileTools = "1" -Requires = "1" StatsBase = "0.34" TropicalNumbers = "0.5.4, 0.6" julia = "1.9" From 9cff2973d370402c32a68b342437505421f7cb0a Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 26 Sep 2023 11:17:49 +0800 Subject: [PATCH 3/6] fix document --- ext/TensorInferenceGTNExt.jl | 14 +------------- src/TensorInference.jl | 14 +++++++++++++- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/ext/TensorInferenceGTNExt.jl b/ext/TensorInferenceGTNExt.jl index c141eba..f0ff184 100644 --- a/ext/TensorInferenceGTNExt.jl +++ b/ext/TensorInferenceGTNExt.jl @@ -1,7 +1,6 @@ module TensorInferenceGTNExt using TensorInference, TensorInference.OMEinsum using TensorInference: TYPEDSIGNATURES, Factor -import TensorInference: update_temperature using GenericTensorNetworks: generate_tensors, GraphProblem, flavors, labels """ @@ -25,18 +24,7 @@ function TensorInference.TensorNetworkModel(problem::GraphProblem, β::Real; evi return TensorNetworkModel(lbs, fill(nflavors, length(lbs)), factors; openvars=iy, evidence, optimizer, simplifier, mars) end -""" -$TYPEDSIGNATURES - -Update the temperature of a tensor network model. -The program will regenerate tensors from the problem, without repeated optimizing the contraction order. - -### Arguments -- `tnet` is the [`TensorNetworkModel`](@ref) instance. -- `problem` is the target constraint satisfiability problem. -- `β` is the inverse temperature. -""" -function update_temperature(tnet::TensorNetworkModel, problem::GraphProblem, β::Real) +function TensorInference.update_temperature(tnet::TensorNetworkModel, problem::GraphProblem, β::Real) tensors = generate_tensors(exp(β), problem) alltensors = [tnet.tensors[1:end-length(tensors)]..., tensors...] return TensorNetworkModel(tnet.vars, tnet.code, alltensors, tnet.evidence, tnet.mars) diff --git a/src/TensorInference.jl b/src/TensorInference.jl index 00f43ac..7614032 100644 --- a/src/TensorInference.jl +++ b/src/TensorInference.jl @@ -36,7 +36,6 @@ export MMAPModel # for GenericTensorNetworks export update_temperature -function update_temperature end include("Core.jl") include("RescaledArray.jl") @@ -55,4 +54,17 @@ include("sampling.jl") # end # end +""" +$TYPEDSIGNATURES + +Update the temperature of a tensor network model. +The program will regenerate tensors from the problem, without repeated optimizing the contraction order. + +### Arguments +- `tnet` is the [`TensorNetworkModel`](@ref) instance. +- `problem` is the target constraint satisfiability problem. +- `β` is the inverse temperature. +""" +function update_temperature end + end # module From 66845a90564f82f53cff74bc83354773535165ca Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 25 Mar 2025 10:58:46 +0800 Subject: [PATCH 4/6] fix documentation --- docs/Project.toml | 3 ++- examples/hard-core-lattice-gas/main.jl | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index 1da673f..20f7833 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,8 +1,9 @@ [deps] Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589" +LuxorGraphPlot = "1f49bdf2-22a7-4bc4-978b-948dc219fbbc" ProblemReductions = "899c297d-f7d2-4ebf-8815-a35996def416" TensorInference = "c2297e78-99bd-40ad-871d-f50e56b81012" TikzPictures = "37f6aa50-8035-52d0-81c2-5a1d08754b2d" diff --git a/examples/hard-core-lattice-gas/main.jl b/examples/hard-core-lattice-gas/main.jl index 2b82f01..575d021 100644 --- a/examples/hard-core-lattice-gas/main.jl +++ b/examples/hard-core-lattice-gas/main.jl @@ -20,11 +20,12 @@ sites = vec([50 .* (a .* i .+ b .* j) for i=1:Na, j=1:Nb]) # There exists blockade interactions between hard-core particles. # We connect two lattice sites within blockade radius by an edge. # Two ends of an edge can not both be occupied by particles. -blockade_radius = 55 -using GenericTensorNetworks: show_graph, unit_disk_graph -using GenericTensorNetworks.Graphs: edges, nv -graph = unit_disk_graph(vec(sites), blockade_radius) -show_graph(graph, sites; texts=fill("", length(sites))) +blockade_radius = 55.0 +using LuxorGraphPlot: show_graph, GraphDisplayConfig +using Graphs: edges, nv, SimpleGraph +using TensorInference.ProblemReductions: UnitDiskGraph, IndependentSet +graph = UnitDiskGraph(vec(sites), blockade_radius) +show_graph(SimpleGraph(graph), sites; texts=fill("", length(sites))) # These constraints defines an independent set problem that characterized by the following energy based model. # Let $G = (V, E)$ be a graph, where $V$ is the set of vertices and $E$ is the set of edges. @@ -38,7 +39,6 @@ show_graph(graph, sites; texts=fill("", length(sites))) # The solution space hard-core lattice gas is equivalent to that of an independent set problem. # The independent set problem involves finding a set of vertices in a graph such that no two vertices in the set are adjacent (i.e., there is no edge connecting them). # One can create a tensor network based modeling of an independent set problem with package [`GenericTensorNetworks.jl`](https://github.com/QuEraComputing/GenericTensorNetworks.jl). -using GenericTensorNetworks problem = IndependentSet(graph) # There are plenty of discussions related to solution space properties in the `GenericTensorNetworks` [documentaion page](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/IndependentSet/). @@ -59,14 +59,14 @@ partition_func[] # The marginal probabilities can be computed with the [`marginals`](@ref) function, which measures how likely a site is occupied. mars = marginals(pmodel) -show_graph(graph, sites; vertex_colors=[(b = mars[[i]][2]; (1-b, 1-b, 1-b)) for i in 1:nv(graph)], texts=fill("", nv(graph))) +show_graph(SimpleGraph(graph), sites; vertex_colors=[(b = mars[[i]][2]; (1-b, 1-b, 1-b)) for i in 1:nv(graph)], texts=fill("", nv(graph))) # The can see the sites at the corner is more likely to be occupied. # To obtain two-site correlations, one can set the variables to query marginal probabilities manually. pmodel2 = TensorNetworkModel(problem, β; mars=[[e.src, e.dst] for e in edges(graph)]) mars = marginals(pmodel2); # We show the probability that both sites on an edge are not occupied -show_graph(graph, sites; edge_colors=[(b = mars[[e.src, e.dst]][1, 1]; (1-b, 1-b, 1-b)) for e in edges(graph)], texts=fill("", nv(graph)), config=GraphDisplayConfig(; edge_line_width=5)) +show_graph(SimpleGraph(graph), sites; edge_colors=[(b = mars[[e.src, e.dst]][1, 1]; (1-b, 1-b, 1-b)) for e in edges(graph)], texts=fill("", nv(graph)), config=GraphDisplayConfig(; edge_line_width=5)) # ## The most likely configuration # The MAP and MMAP can be used to get the most likely configuration given an evidence. @@ -77,7 +77,7 @@ mars = marginals(pmodel3) logp, config = most_probable_config(pmodel3) # The log probability is 102. Let us visualize the configuration. -show_graph(graph, sites; vertex_colors=[(1-b, 1-b, 1-b) for b in config], texts=fill("", nv(graph))) +show_graph(SimpleGraph(graph), sites; vertex_colors=[(1-b, 1-b, 1-b) for b in config], texts=fill("", nv(graph))) # The number of particles is sum(config) @@ -86,7 +86,7 @@ pmodel3 = TensorNetworkModel(problem, β; evidence=Dict(1=>0)) logp2, config2 = most_probable_config(pmodel) # The log probability is 99, which is much smaller. -show_graph(graph, sites; vertex_colors=[(1-b, 1-b, 1-b) for b in config2], texts=fill("", nv(graph))) +show_graph(SimpleGraph(graph), sites; vertex_colors=[(1-b, 1-b, 1-b) for b in config2], texts=fill("", nv(graph))) # The number of particles is sum(config2) From 09004cf0158fc7e74b7259cd8a70a94eb15d9914 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 25 Mar 2025 11:00:45 +0800 Subject: [PATCH 5/6] prevent long vector --- examples/hard-core-lattice-gas/main.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/hard-core-lattice-gas/main.jl b/examples/hard-core-lattice-gas/main.jl index 575d021..14cc289 100644 --- a/examples/hard-core-lattice-gas/main.jl +++ b/examples/hard-core-lattice-gas/main.jl @@ -15,7 +15,7 @@ a, b = (1, 0), (0.5, 0.5*sqrt(3)) Na, Nb = 10, 10 -sites = vec([50 .* (a .* i .+ b .* j) for i=1:Na, j=1:Nb]) +sites = vec([50 .* (a .* i .+ b .* j) for i=1:Na, j=1:Nb]); # There exists blockade interactions between hard-core particles. # We connect two lattice sites within blockade radius by an edge. From 20048f05bd6d3a57483382f3e315175344ef2795 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 25 Mar 2025 11:17:48 +0800 Subject: [PATCH 6/6] fix the energy mode --- src/cspmodels.jl | 2 +- test/cspmodels.jl | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/cspmodels.jl b/src/cspmodels.jl index 9af4ef3..128f684 100644 --- a/src/cspmodels.jl +++ b/src/cspmodels.jl @@ -4,7 +4,7 @@ function generate_tensors(β::T, problem::ConstraintSatisfactionProblem) where T objs = ProblemReductions.objectives(problem) ixs = vcat([t.variables for t in cons], [t.variables for t in objs]) # generate tensors for x = e^β - x = exp(β) + x = energy_mode(problem) === LargerSizeIsBetter() ? exp(β) : exp(-β) tensors = vcat( Array{T}[reshape(map(s -> s ? one(x) : zero(x), t.specification), ntuple(i->num_flavors(problem), length(t.variables))) for t in cons], Array{T}[reshape(map(s -> x^s, t.specification), ntuple(i->num_flavors(problem), length(t.variables))) for t in objs] diff --git a/test/cspmodels.jl b/test/cspmodels.jl index fec53f1..c7b559b 100644 --- a/test/cspmodels.jl +++ b/test/cspmodels.jl @@ -1,5 +1,5 @@ using Test -using TensorInference +using TensorInference, ProblemReductions.Graphs using GenericTensorNetworks @testset "marginals" begin @@ -25,4 +25,10 @@ using GenericTensorNetworks model = MMAPModel(problem, β; queryvars=[1,4]) logp, config = most_probable_config(model) @test config == [0, 0] + + β = 1.0 + problem = SpinGlass(g, -ones(Int, ne(g)), zeros(Int, nv(g))) + model = TensorNetworkModel(problem, β; mars=[[2, 3]]) + samples = sample(model, 100) + @test sum(energy.(Ref(problem), samples))/100 <= -14 end \ No newline at end of file