@@ -84,12 +84,12 @@ def sample(
8484 X_train : Union [pd .DataFrame , np .array ],
8585 Z_train : np .array ,
8686 y_train : np .array ,
87- pi_train : np .array = None ,
87+ propensity_train : np .array = None ,
8888 rfx_group_ids_train : np .array = None ,
8989 rfx_basis_train : np .array = None ,
9090 X_test : Union [pd .DataFrame , np .array ] = None ,
9191 Z_test : np .array = None ,
92- pi_test : np .array = None ,
92+ propensity_test : np .array = None ,
9393 rfx_group_ids_test : np .array = None ,
9494 rfx_basis_test : np .array = None ,
9595 num_gfr : int = 5 ,
@@ -114,7 +114,7 @@ def sample(
114114 Array of (continuous or binary; univariate or multivariate) treatment assignments.
115115 y_train : np.array
116116 Outcome to be modeled by the ensemble.
117- pi_train : np.array
117+ propensity_train : np.array
118118 Optional vector of propensity scores. If not provided, this will be estimated from the data.
119119 rfx_group_ids_train : np.array, optional
120120 Optional group labels used for an additive random effects model.
@@ -125,7 +125,7 @@ def sample(
125125 Z_test : np.array, optional
126126 Optional test set of (continuous or binary) treatment assignments.
127127 Must be provided if `X_test` is provided.
128- pi_test : np.array, optional
128+ propensity_test : np.array, optional
129129 Optional test set vector of propensity scores. If not provided (but `X_test` and `Z_test` are), this will be estimated from the data.
130130 rfx_group_ids_test : np.array, optional
131131 Optional test set group labels used for an additive random effects model. We do not currently support (but plan to in the near future),
@@ -541,9 +541,9 @@ def sample(
541541 raise ValueError ("X_train must be a pandas dataframe or numpy array" )
542542 if not isinstance (Z_train , np .ndarray ):
543543 raise ValueError ("Z_train must be a numpy array" )
544- if pi_train is not None :
545- if not isinstance (pi_train , np .ndarray ):
546- raise ValueError ("pi_train must be a numpy array" )
544+ if propensity_train is not None :
545+ if not isinstance (propensity_train , np .ndarray ):
546+ raise ValueError ("propensity_train must be a numpy array" )
547547 if not isinstance (y_train , np .ndarray ):
548548 raise ValueError ("y_train must be a numpy array" )
549549 if X_test is not None :
@@ -554,9 +554,9 @@ def sample(
554554 if Z_test is not None :
555555 if not isinstance (Z_test , np .ndarray ):
556556 raise ValueError ("Z_test must be a numpy array" )
557- if pi_test is not None :
558- if not isinstance (pi_test , np .ndarray ):
559- raise ValueError ("pi_test must be a numpy array" )
557+ if propensity_test is not None :
558+ if not isinstance (propensity_test , np .ndarray ):
559+ raise ValueError ("propensity_test must be a numpy array" )
560560 if rfx_group_ids_train is not None :
561561 if not isinstance (rfx_group_ids_train , np .ndarray ):
562562 raise ValueError ("rfx_group_ids_train must be a numpy array" )
@@ -585,9 +585,9 @@ def sample(
585585 if Z_train is not None :
586586 if Z_train .ndim == 1 :
587587 Z_train = np .expand_dims (Z_train , 1 )
588- if pi_train is not None :
589- if pi_train .ndim == 1 :
590- pi_train = np .expand_dims (pi_train , 1 )
588+ if propensity_train is not None :
589+ if propensity_train .ndim == 1 :
590+ propensity_train = np .expand_dims (propensity_train , 1 )
591591 if y_train .ndim == 1 :
592592 y_train = np .expand_dims (y_train , 1 )
593593 if X_test is not None :
@@ -597,9 +597,9 @@ def sample(
597597 if Z_test is not None :
598598 if Z_test .ndim == 1 :
599599 Z_test = np .expand_dims (Z_test , 1 )
600- if pi_test is not None :
601- if pi_test .ndim == 1 :
602- pi_test = np .expand_dims (pi_test , 1 )
600+ if propensity_test is not None :
601+ if propensity_test .ndim == 1 :
602+ propensity_test = np .expand_dims (propensity_test , 1 )
603603 if rfx_group_ids_train is not None :
604604 if rfx_group_ids_train .ndim != 1 :
605605 rfx_group_ids_train = np .squeeze (rfx_group_ids_train )
@@ -631,17 +631,17 @@ def sample(
631631 raise ValueError ("X_train and Z_train must have the same number of rows" )
632632 if y_train .shape [0 ] != X_train .shape [0 ]:
633633 raise ValueError ("X_train and y_train must have the same number of rows" )
634- if pi_train is not None :
635- if pi_train .shape [0 ] != X_train .shape [0 ]:
634+ if propensity_train is not None :
635+ if propensity_train .shape [0 ] != X_train .shape [0 ]:
636636 raise ValueError (
637- "X_train and pi_train must have the same number of rows"
637+ "X_train and propensity_train must have the same number of rows"
638638 )
639639 if X_test is not None and Z_test is not None :
640640 if X_test .shape [0 ] != Z_test .shape [0 ]:
641641 raise ValueError ("X_test and Z_test must have the same number of rows" )
642- if X_test is not None and pi_test is not None :
643- if X_test .shape [0 ] != pi_test .shape [0 ]:
644- raise ValueError ("X_test and pi_test must have the same number of rows" )
642+ if X_test is not None and propensity_test is not None :
643+ if X_test .shape [0 ] != propensity_test .shape [0 ]:
644+ raise ValueError ("X_test and propensity_test must have the same number of rows" )
645645
646646 # Raise a warning if the data have ties and only GFR is being run
647647 if (num_gfr > 0 ) and (num_burnin == 0 ) and (num_mcmc == 0 ):
@@ -1311,10 +1311,10 @@ def sample(
13111311 sample_sigma2_leaf_tau = False
13121312
13131313 # Check if user has provided propensities that are needed in the model
1314- if pi_train is None and propensity_covariate != "none" :
1314+ if propensity_train is None and propensity_covariate != "none" :
13151315 if self .multivariate_treatment :
13161316 raise ValueError (
1317- "Propensities must be provided (via pi_train and / or pi_test parameters) or omitted by setting propensity_covariate = 'none' for multivariate treatments"
1317+ "Propensities must be provided (via propensity_train and / or propensity_test parameters) or omitted by setting propensity_covariate = 'none' for multivariate treatments"
13181318 )
13191319 else :
13201320 self .bart_propensity_model = BARTModel ()
@@ -1330,10 +1330,10 @@ def sample(
13301330 num_burnin = num_burnin_propensity ,
13311331 num_mcmc = num_mcmc_propensity ,
13321332 )
1333- pi_train = np .mean (
1333+ propensity_train = np .mean (
13341334 self .bart_propensity_model .y_hat_train , axis = 1 , keepdims = True
13351335 )
1336- pi_test = np .mean (
1336+ propensity_test = np .mean (
13371337 self .bart_propensity_model .y_hat_test , axis = 1 , keepdims = True
13381338 )
13391339 else :
@@ -1344,7 +1344,7 @@ def sample(
13441344 num_burnin = num_burnin_propensity ,
13451345 num_mcmc = num_mcmc_propensity ,
13461346 )
1347- pi_train = np .mean (
1347+ propensity_train = np .mean (
13481348 self .bart_propensity_model .y_hat_train , axis = 1 , keepdims = True
13491349 )
13501350 self .internal_propensity_model = True
@@ -1674,34 +1674,34 @@ def sample(
16741674 )
16751675 if propensity_covariate != "none" :
16761676 feature_types = np .append (
1677- feature_types , np .repeat (0 , pi_train .shape [1 ])
1677+ feature_types , np .repeat (0 , propensity_train .shape [1 ])
16781678 ).astype ("int" )
1679- X_train_processed = np .c_ [X_train_processed , pi_train ]
1679+ X_train_processed = np .c_ [X_train_processed , propensity_train ]
16801680 if self .has_test :
1681- X_test_processed = np .c_ [X_test_processed , pi_test ]
1681+ X_test_processed = np .c_ [X_test_processed , propensity_test ]
16821682 if propensity_covariate == "prognostic" :
16831683 variable_weights_mu = np .append (
1684- variable_weights_mu , np .repeat (1 / num_cov_orig , pi_train .shape [1 ])
1684+ variable_weights_mu , np .repeat (1 / num_cov_orig , propensity_train .shape [1 ])
16851685 )
16861686 variable_weights_tau = np .append (
1687- variable_weights_tau , np .repeat (0.0 , pi_train .shape [1 ])
1687+ variable_weights_tau , np .repeat (0.0 , propensity_train .shape [1 ])
16881688 )
16891689 elif propensity_covariate == "treatment_effect" :
16901690 variable_weights_mu = np .append (
1691- variable_weights_mu , np .repeat (0.0 , pi_train .shape [1 ])
1691+ variable_weights_mu , np .repeat (0.0 , propensity_train .shape [1 ])
16921692 )
16931693 variable_weights_tau = np .append (
1694- variable_weights_tau , np .repeat (1 / num_cov_orig , pi_train .shape [1 ])
1694+ variable_weights_tau , np .repeat (1 / num_cov_orig , propensity_train .shape [1 ])
16951695 )
16961696 elif propensity_covariate == "both" :
16971697 variable_weights_mu = np .append (
1698- variable_weights_mu , np .repeat (1 / num_cov_orig , pi_train .shape [1 ])
1698+ variable_weights_mu , np .repeat (1 / num_cov_orig , propensity_train .shape [1 ])
16991699 )
17001700 variable_weights_tau = np .append (
1701- variable_weights_tau , np .repeat (1 / num_cov_orig , pi_train .shape [1 ])
1701+ variable_weights_tau , np .repeat (1 / num_cov_orig , propensity_train .shape [1 ])
17021702 )
17031703 variable_weights_variance = np .append (
1704- variable_weights_variance , np .repeat (0.0 , pi_train .shape [1 ])
1704+ variable_weights_variance , np .repeat (0.0 , propensity_train .shape [1 ])
17051705 )
17061706
17071707 # Renormalize variable weights
0 commit comments