@@ -552,88 +552,115 @@ def resample(self):
552552
553553 def bayes_risk (self , expparams ):
554554 r"""
555- Calculates the Bayes risk for a hypothetical experiment , assuming the
555+ Calculates the Bayes risk for hypothetical experiments , assuming the
556556 quadratic loss function defined by the current model's scale matrix
557557 (see :attr:`qinfer.abstract_model.Simulatable.Q`).
558558
559- :param expparams: The experiment at which to compute the Bayes risk.
559+ :param expparams: The experiments at which to compute the risk.
560560 :type expparams: :class:`~numpy.ndarray` of dtype given by the current
561561 model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property,
562562 and of shape ``(1,)``
563563
564- :return float: The Bayes risk for the current posterior distribution
565- of the hypothetical experiment ``expparams``.
564+ :return np.ndarray: The Bayes risk for the current posterior distribution
565+ at each hypothetical experiment in ``expparams``, therefore
566+ has shape ``(expparams.size,)``
566567 """
567- # This subroutine computes the bayes risk for a hypothetical experiment
568- # defined by expparams.
569-
570- # Assume expparams is a single experiment
571-
572- # expparams =
573- # Q = np array(Nmodelparams), which contains the diagonal part of the
574- # rescaling matrix. Non-diagonal could also be considered, but
575- # for the moment this is not implemented.
576- nout = self .model .n_outcomes (expparams ) # This is a vector so this won't work
577- w , N = self .hypothetical_update (np .arange (nout ), expparams , return_normalization = True )
578- w = w [:, 0 , :] # Fix w.shape == (n_outcomes, n_particles).
579- N = N [:, :, 0 ] # Fix L.shape == (n_outcomes, n_particles).
580-
581- xs = self .particle_locations .transpose ([1 , 0 ]) # shape (n_mp, n_particles).
582-
583- # In the following, we will use the subscript convention that
584- # "o" refers to an outcome, "p" to a particle, and
585- # "i" to a model parameter.
586- # Thus, mu[o,i] is the sum over all particles of w[o,p] * x[i,p].
587-
588- mu = np .transpose (np .tensordot (w ,xs ,axes = (1 ,1 )))
589- var = (
590- # This sum is a reduction over the particle index and thus
591- # represents an expectation value over the diagonal of the
592- # outer product $x . x^T$.
593-
594- np .transpose (np .tensordot (w ,xs ** 2 ,axes = (1 ,1 )))
595- # We finish by subracting from the above expectation value
596- # the diagonal of the outer product $mu . mu^T$.
597- - mu ** 2 ).T
598568
569+ # for models whose outcome number changes with experiment, we
570+ # take the easy way out and for-loop over experiments
571+ n_eps = expparams .size
572+ if n_eps > 1 and not self .model .is_n_outcomes_constant :
573+ risk = np .empty (n_eps )
574+ for idx in range (n_eps ):
575+ risk [idx ] = self .bayes_risk (expparams [idx , np .newaxis ])
576+ return risk
577+
578+ # outcomes for the first experiment
579+ os = self .model .domain (expparams [0 ,np .newaxis ])[0 ].values
580+
581+ # compute the hypothetical weights, likelihoods and normalizations for
582+ # every possible outcome and expparam
583+ # the likelihood over outcomes should sum to 1, so don't compute for last outcome
584+ w_hyp , L , N = self .hypothetical_update (
585+ os [:- 1 ],
586+ expparams ,
587+ return_normalization = True ,
588+ return_likelihood = True
589+ )
590+ w_hyp_last_outcome = (1 - L .sum (axis = 0 )) * self .particle_weights [np .newaxis , :]
591+ N = np .concatenate ([N [:,:,0 ], np .sum (w_hyp_last_outcome [np .newaxis ,:,:], axis = 2 )], axis = 0 )
592+ w_hyp_last_outcome = w_hyp_last_outcome / N [- 1 ,:,np .newaxis ]
593+ w_hyp = np .concatenate ([w_hyp , w_hyp_last_outcome [np .newaxis ,:,:]], axis = 0 )
594+ # w_hyp.shape == (n_out, n_eps, n_particles)
595+ # N.shape == (n_out, n_eps)
596+
597+ # compute the hypothetical means and variances given outcomes and exparams
598+ # mu_hyp.shape == (n_out, n_eps, n_models)
599+ # var_hyp.shape == (n_out, n_eps)
600+ mu_hyp = np .dot (w_hyp , self .particle_locations )
601+ var_hyp = np .sum (
602+ w_hyp *
603+ np .sum (self .model .Q * (
604+ self .particle_locations [np .newaxis ,np .newaxis ,:,:] -
605+ mu_hyp [:,:,np .newaxis ,:]
606+ ) ** 2 , axis = 3 ),
607+ axis = 2
608+ )
599609
600- rescale_var = np .sum (self .model .Q * var , axis = 1 )
601- # Q has shape (n_mp,), therefore rescale_var has shape (n_outcomes,).
602- tot_norm = np .sum (N , axis = 1 )
603- return np .dot (tot_norm .T , rescale_var )
610+ # the risk of a given expparam can be calculated as the mean posterior
611+ # variance weighted over all possible outcomes
612+ return np .sum (N * var_hyp , axis = 0 )
604613
605614 def expected_information_gain (self , expparams ):
606615 r"""
607- Calculates the expected information gain for a hypothetical experiment.
616+ Calculates the expected information gain for each hypothetical experiment.
608617
609- :param expparams: The experiment at which to compute expected
618+ :param expparams: The experiments at which to compute expected
610619 information gain.
611620 :type expparams: :class:`~numpy.ndarray` of dtype given by the current
612621 model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property,
613- and of shape ``(1 ,)``
622+ and of shape ``(n ,)``
614623
615- :return float: The Bayes risk for the current posterior distribution
616- of the hypothetical experiment ``expparams``.
624+ :return float: The expected information gain for each
625+ hypothetical experiment in ``expparams``.
617626 """
618-
619- nout = self .model .n_outcomes (expparams )
620- w , N = self .hypothetical_update (np .arange (nout ), expparams , return_normalization = True )
621- w = w [:, 0 , :] # Fix w.shape == (n_outcomes, n_particles).
622- N = N [:, :, 0 ] # Fix N.shape == (n_outcomes, n_particles).
623-
624627 # This is a special case of the KL divergence estimator (see below),
625628 # in which the other distribution is guaranteed to share support.
626- #
627- # KLD[idx_outcome] = Sum over particles(self * log(self / other[idx_outcome])
628- # Est. KLD = E[KLD[idx_outcome] | outcomes].
629+
630+ # for models whose outcome number changes with experiment, we
631+ # take the easy way out and for-loop over experiments
632+ n_eps = expparams .size
633+ if n_eps > 1 and not self .model .is_n_outcomes_constant :
634+ risk = np .empty (n_eps )
635+ for idx in range (n_eps ):
636+ risk [idx ] = self .expected_information_gain (expparams [idx , np .newaxis ])
637+ return risk
638+
639+ # number of outcomes for the first experiment
640+ os = self .model .domain (expparams [0 ,np .newaxis ])[0 ].values
641+
642+ # compute the hypothetical weights, likelihoods and normalizations for
643+ # every possible outcome and expparam
644+ # the likelihood over outcomes should sum to 1, so don't compute for last outcome
645+ w_hyp , L , N = self .hypothetical_update (
646+ os [:- 1 ],
647+ expparams ,
648+ return_normalization = True ,
649+ return_likelihood = True
650+ )
651+ w_hyp_last_outcome = (1 - L .sum (axis = 0 )) * self .particle_weights [np .newaxis , :]
652+ N = np .concatenate ([N [:,:,0 ], np .sum (w_hyp_last_outcome [np .newaxis ,:,:], axis = 2 )], axis = 0 )
653+ w_hyp_last_outcome = w_hyp_last_outcome / N [- 1 ,:,np .newaxis ]
654+ w_hyp = np .concatenate ([w_hyp , w_hyp_last_outcome [np .newaxis ,:,:]], axis = 0 )
655+ # w_hyp.shape == (n_out, n_eps, n_particles)
656+ # N.shape == (n_out, n_eps)
629657
630- KLD = np .sum (
631- w * np .log (w / self .particle_weights ),
632- axis = 1 # Sum over particles.
633- )
658+ # compute the Kullback-Liebler divergence for every experiment and possible outcome
659+ # KLD.shape == (n_out, n_eps)
660+ KLD = np .sum (w_hyp * np .log (w_hyp / self .particle_weights ), axis = 2 )
634661
635- tot_norm = np . sum ( N , axis = 1 )
636- return np .dot ( tot_norm , KLD )
662+ # return the expected KLD (ie expected info gain) for every experiment
663+ return np .sum ( N * KLD , axis = 0 )
637664
638665 ## MISC METHODS ###########################################################
639666
@@ -1212,4 +1239,3 @@ def update(self, outcome, expparams,check_for_resample=True):
12121239
12131240 # We now can update as normal.
12141241 SMCUpdater .update (self , outcome , expparams ,check_for_resample = check_for_resample )
1215-
0 commit comments