|
30 | 30 |
|
31 | 31 | Base.show(io::IO, ::MIME"text/plain", uai::UAIModel) = Base.show(io, uai) |
32 | 32 | function Base.show(io::IO, uai::UAIModel) |
33 | | - println(io, "UAIModel(nvars = $(uai.nvars), nfactors = $(length(uai.factors))") |
| 33 | + println(io, "UAIModel(nvars = $(uai.nvars), nfactors = $(length(uai.factors)))") |
34 | 34 | println(io, " cards : $(uai.cards)") |
35 | 35 | println(io, " factors : ") |
36 | 36 | for (k, f) in enumerate(uai.factors) |
@@ -180,7 +180,7 @@ chevidence(tn::TensorNetworkModel, evidence) = TensorNetworkModel(tn.vars, tn.co |
180 | 180 | """ |
181 | 181 | $(TYPEDSIGNATURES) |
182 | 182 |
|
183 | | -Evaluate the log probability of `config`. |
| 183 | +Evaluate the log probability (or partition function) of `config`. |
184 | 184 | """ |
185 | 185 | function log_probability(tn::TensorNetworkModel, config::Union{Dict, AbstractVector})::Real |
186 | 186 | assign = config isa AbstractVector ? Dict(zip(get_vars(tn), config)) : config |
|
190 | 190 | """ |
191 | 191 | $(TYPEDSIGNATURES) |
192 | 192 |
|
193 | | -Contract the tensor network and return a probability array with its rank specified in the contraction code `tn.code`. |
194 | | -The returned array may not be l1-normalized even if the total probability is l1-normalized, because the evidence `tn.evidence` may not be empty. |
| 193 | +Contract the tensor network and return an array of probability of evidence. |
| 194 | +Precisely speaking, the return value is the partition function, which may not be l1-normalized. |
| 195 | +
|
| 196 | +If the `openvars` of the input tensor networks is zero, the array rank is zero. |
| 197 | +Otherwise, the return values corresponds to marginal probabilities. |
195 | 198 | """ |
196 | 199 | function probability(tn::TensorNetworkModel; usecuda = false, rescale = true)::AbstractArray |
197 | 200 | return tn.code(adapt_tensors(tn; usecuda, rescale)...) |
|
0 commit comments