@@ -566,10 +566,6 @@ def bayes_risk(self, expparams):
566566 has shape ``(expparams.size,)``
567567 """
568568
569- # outcomes for the first experiment
570- os = self .model .domain (None ).values
571- n_out = os .size
572-
573569 # for models whose outcome number changes with experiment, we
574570 # take the easy way out and for-loop over experiments
575571 n_eps = expparams .size
@@ -579,6 +575,9 @@ def bayes_risk(self, expparams):
579575 risk [idx ] = self .bayes_risk (expparams [idx , np .newaxis ])
580576 return risk
581577
578+ # outcomes for the first experiment
579+ os = self .model .domain (expparams [0 ,np .newaxis ])[0 ].values
580+
582581 # compute the hypothetical weights, likelihoods and normalizations for
583582 # every possible outcome and expparam
584583 # the likelihood over outcomes should sum to 1, so don't compute for last outcome
@@ -628,10 +627,6 @@ def expected_information_gain(self, expparams):
628627 # This is a special case of the KL divergence estimator (see below),
629628 # in which the other distribution is guaranteed to share support.
630629
631- # number of outcomes for the first experiment
632- os = self .model .domain (None ).values
633- n_out = os .size
634-
635630 # for models whose outcome number changes with experiment, we
636631 # take the easy way out and for-loop over experiments
637632 n_eps = expparams .size
@@ -641,6 +636,9 @@ def expected_information_gain(self, expparams):
641636 risk [idx ] = self .expected_information_gain (expparams [idx , np .newaxis ])
642637 return risk
643638
639+ # number of outcomes for the first experiment
640+ os = self .model .domain (expparams [0 ,np .newaxis ])[0 ].values
641+
644642 # compute the hypothetical weights, likelihoods and normalizations for
645643 # every possible outcome and expparam
646644 # the likelihood over outcomes should sum to 1, so don't compute for last outcome
0 commit comments