|
12 | 12 | from timeit import default_timer as timer |
13 | 13 |
|
14 | 14 | import numpy as np |
| 15 | +import tensorflow as tf |
15 | 16 | from loguru import logger |
16 | 17 | from scipy.stats import norm |
17 | 18 | from sklearn.linear_model import LinearRegression |
18 | 19 |
|
19 | 20 | from . import constants |
20 | | -from .multi_partner_learning import basic_mpl |
| 21 | +from .multi_partner_learning import basic_mpl, fast_mpl |
21 | 22 |
|
22 | 23 |
|
23 | 24 | class KrigingModel: |
@@ -1113,23 +1114,37 @@ def compute_relative_perf_matrix(self): |
1113 | 1114 |
|
1114 | 1115 | return relative_perf_matrix |
1115 | 1116 |
|
1116 | | - def s_model(self): |
| 1117 | + def statistcal_distances_via_smodel(self): |
| 1118 | + |
1117 | 1119 | start = timer() |
1118 | | - mpl = basic_mpl.FedAvgSmodel(self.scenario) |
| 1120 | + mpl = fast_mpl.FastFedAvgSmodel(self.scenario, self.scenario.mpl.pretrain_epochs) |
1119 | 1121 | mpl.fit() |
1120 | | - theta_estimated = np.zeros((mpl.partners_count, |
1121 | | - mpl.dataset.num_classes, |
1122 | | - mpl.dataset.num_classes)) |
| 1122 | + cross_entropy = tf.keras.metrics.CategoricalCrossentropy() |
| 1123 | + self.contributivity_scores = {'Kullbakc divergence': [0 for _ in mpl.partners_list], |
| 1124 | + 'ma': [0 for _ in mpl.partners_list], 'Hennigen': [0 for _ in mpl.partners_list]} |
1123 | 1125 | for i, partnerMpl in enumerate(mpl.partners_list): |
1124 | | - theta_estimated[i] = (np.exp(partnerMpl.noise_layer_weights) / np.sum( |
1125 | | - np.exp(partnerMpl.noise_layer_weights), axis=2)) |
1126 | | - self.contributivity_scores = np.exp(- np.array([np.linalg.norm( |
1127 | | - theta_estimated[i] - np.identity(mpl.dataset.num_classes) |
1128 | | - ) for i in range(len(self.scenario.partners_list))])) |
1129 | | - |
1130 | | - self.name = "S-Model" |
| 1126 | + y_global = mpl.model.predict(partnerMpl.x_train) |
| 1127 | + y_local = mpl.smodel_list[i].predict(y_global) |
| 1128 | + cross_entropy.update_state(y_global, y_local) |
| 1129 | + cs = cross_entropy.result().numpy() |
| 1130 | + cross_entropy.reset_state() |
| 1131 | + cross_entropy.update_state(y_global, y_global) |
| 1132 | + e = cross_entropy.result().numpy() |
| 1133 | + cross_entropy.reset_state() |
| 1134 | + self.contributivity_scores['Kullbakc divergence'][i] = cs - e |
| 1135 | + BC = 0 |
| 1136 | + for y_g, y_l in zip(y_global, y_local): |
| 1137 | + BC += np.sum(np.sqrt(y_g * y_l)) |
| 1138 | + BC /= len(y_global) |
| 1139 | + self.contributivity_scores['Kullback Leiber divergence'][i] = cs - e |
| 1140 | + self.contributivity_scores['Bhattacharyya distance'][i] = - np.log(BC) |
| 1141 | + self.contributivity_scores['Hellinger metric'][i] = np.sqrt(1 - BC) |
| 1142 | + |
| 1143 | + self.name = "Statistic metric via S-model" |
1131 | 1144 | self.scores_std = np.zeros(mpl.partners_count) |
1132 | | - self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores) |
| 1145 | + self.normalized_scores = {} |
| 1146 | + for key, value in self.contributivity_scores.items(): |
| 1147 | + self.normalized_scores[key] = value / np.sum(value) |
1133 | 1148 | end = timer() |
1134 | 1149 | self.computation_time_sec = end - start |
1135 | 1150 |
|
|
0 commit comments