Skip to content

Commit 647674d

Browse files
committed
enable automatic installation of dependencies
1 parent 981d47a commit 647674d

File tree

5 files changed

+77
-61
lines changed

5 files changed

+77
-61
lines changed

pymic/loss/seg/ssl.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,9 @@
66
import torch.nn as nn
77
import numpy as np
88
from pymic.loss.seg.util import reshape_tensor_to_2D
9+
from pymic.loss.seg.abstract import AbstractSegLoss
910

10-
class EntropyLoss(nn.Module):
11+
class EntropyLoss(AbstractSegLoss):
1112
"""
1213
Entropy Minimization for segmentation tasks.
1314
The parameters should be written in the `params` dictionary, and it has the
@@ -43,7 +44,7 @@ def forward(self, loss_input_dict):
4344
avg_ent = torch.mean(entropy)
4445
return avg_ent
4546

46-
class TotalVariationLoss(nn.Module):
47+
class TotalVariationLoss(AbstractSegLoss):
4748
"""
4849
Total Variation Loss for segmentation tasks.
4950
The parameters should be written in the `params` dictionary, and it has the

pymic/util/evaluation_seg.py

Lines changed: 59 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -260,14 +260,15 @@ def evaluation(config):
260260
Run evaluation of segmentation results based on a configuration dictionary `config`.
261261
The following fields should be provided in `config`:
262262
263-
:param metric: (str) The metric for evaluation.
263+
:param metric_list: (list) The list of metrics for evaluation.
264264
The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
265265
:param label_list: (list) The list of labels for evaluation.
266266
:param label_fuse: (option, bool) If true, fuse the labels in the `label_list`
267267
as the foreground, and other labels as the background. Default is False.
268268
:param organ_name: (str) The name of the organ for segmentation.
269269
:param ground_truth_folder_root: (str) The root dir of ground truth images.
270-
:param segmentation_folder_root: (str) The root dir of segmentation images.
270+
:param segmentation_folder_root: (str or list) The root dir of segmentation images.
271+
When a list is given, each list element should be the root dir of the results of one method.
271272
:param evaluation_image_pair: (str) The csv file that provide the segmentation
272273
images and the corresponding ground truth images.
273274
:param ground_truth_label_convert_source: (optional, list) The list of source
@@ -280,7 +281,7 @@ def evaluation(config):
280281
labels for label conversion in the segmentation.
281282
"""
282283

283-
metric = config['metric']
284+
metric_list = config['metric_list']
284285
label_list = config['label_list']
285286
label_fuse = config.get('label_fuse', False)
286287
organ_name = config['organ_name']
@@ -295,60 +296,62 @@ def evaluation(config):
295296
segmentation_label_convert_target = config.get('segmentation_label_convert_target', None)
296297

297298
image_items = pd.read_csv(image_pair_csv)
298-
item_num = len(image_items)
299-
for seg_root_n in seg_root:
300-
score_all_data = []
301-
name_score_list= []
302-
for i in range(item_num):
303-
gt_name = image_items.iloc[i, 0]
304-
seg_name = image_items.iloc[i, 1]
305-
# seg_name = seg_name.replace(".nii.gz", "_pred.nii.gz")
306-
gt_full_name = gt_root + '/' + gt_name
307-
seg_full_name = seg_root_n + '/' + seg_name
308-
309-
s_dict = load_image_as_nd_array(seg_full_name)
310-
g_dict = load_image_as_nd_array(gt_full_name)
311-
s_volume = s_dict["data_array"]; s_spacing = s_dict["spacing"]
312-
g_volume = g_dict["data_array"]; g_spacing = g_dict["spacing"]
313-
# for dim in range(len(s_spacing)):
314-
# assert(s_spacing[dim] == g_spacing[dim])
315-
if((ground_truth_label_convert_source is not None) and \
316-
ground_truth_label_convert_target is not None):
317-
g_volume = convert_label(g_volume, ground_truth_label_convert_source, \
318-
ground_truth_label_convert_target)
319-
320-
if((segmentation_label_convert_source is not None) and \
321-
segmentation_label_convert_target is not None):
322-
s_volume = convert_label(s_volume, segmentation_label_convert_source, \
323-
segmentation_label_convert_target)
324-
325-
score_vector = get_multi_class_evaluation_score(s_volume, g_volume, label_list,
326-
label_fuse, s_spacing, metric )
327-
if(len(label_list) > 1):
328-
score_vector.append(np.asarray(score_vector).mean())
329-
score_all_data.append(score_vector)
330-
name_score_list.append([seg_name] + score_vector)
331-
print(seg_name, score_vector)
332-
score_all_data = np.asarray(score_all_data)
333-
score_mean = score_all_data.mean(axis = 0)
334-
score_std = score_all_data.std(axis = 0)
335-
name_score_list.append(['mean'] + list(score_mean))
336-
name_score_list.append(['std'] + list(score_std))
299+
item_num = len(image_items)
337300

338-
# save the result as csv
339-
score_csv = "{0:}/{1:}_{2:}_all.csv".format(seg_root_n, organ_name, metric)
340-
with open(score_csv, mode='w') as csv_file:
341-
csv_writer = csv.writer(csv_file, delimiter=',',
342-
quotechar='"',quoting=csv.QUOTE_MINIMAL)
343-
head = ['image'] + ["class_{0:}".format(i) for i in label_list]
344-
if(len(label_list) > 1):
345-
head = head + ["average"]
346-
csv_writer.writerow(head)
347-
for item in name_score_list:
348-
csv_writer.writerow(item)
349-
350-
print("{0:} mean ".format(metric), score_mean)
351-
print("{0:} std ".format(metric), score_std)
301+
for seg_root_n in seg_root: # for each segmentation method
302+
for metric in metric_list:
303+
score_all_data = []
304+
name_score_list= []
305+
for i in range(item_num):
306+
gt_name = image_items.iloc[i, 0]
307+
seg_name = image_items.iloc[i, 1]
308+
# seg_name = seg_name.replace(".nii.gz", "_pred.nii.gz")
309+
gt_full_name = gt_root + '/' + gt_name
310+
seg_full_name = seg_root_n + '/' + seg_name
311+
312+
s_dict = load_image_as_nd_array(seg_full_name)
313+
g_dict = load_image_as_nd_array(gt_full_name)
314+
s_volume = s_dict["data_array"]; s_spacing = s_dict["spacing"]
315+
g_volume = g_dict["data_array"]; g_spacing = g_dict["spacing"]
316+
# for dim in range(len(s_spacing)):
317+
# assert(s_spacing[dim] == g_spacing[dim])
318+
if((ground_truth_label_convert_source is not None) and \
319+
ground_truth_label_convert_target is not None):
320+
g_volume = convert_label(g_volume, ground_truth_label_convert_source, \
321+
ground_truth_label_convert_target)
322+
323+
if((segmentation_label_convert_source is not None) and \
324+
segmentation_label_convert_target is not None):
325+
s_volume = convert_label(s_volume, segmentation_label_convert_source, \
326+
segmentation_label_convert_target)
327+
328+
score_vector = get_multi_class_evaluation_score(s_volume, g_volume, label_list,
329+
label_fuse, s_spacing, metric )
330+
if(len(label_list) > 1):
331+
score_vector.append(np.asarray(score_vector).mean())
332+
score_all_data.append(score_vector)
333+
name_score_list.append([seg_name] + score_vector)
334+
print(seg_name, score_vector)
335+
score_all_data = np.asarray(score_all_data)
336+
score_mean = score_all_data.mean(axis = 0)
337+
score_std = score_all_data.std(axis = 0)
338+
name_score_list.append(['mean'] + list(score_mean))
339+
name_score_list.append(['std'] + list(score_std))
340+
341+
# save the result as csv
342+
score_csv = "{0:}/{1:}_{2:}_all.csv".format(seg_root_n, organ_name, metric)
343+
with open(score_csv, mode='w') as csv_file:
344+
csv_writer = csv.writer(csv_file, delimiter=',',
345+
quotechar='"',quoting=csv.QUOTE_MINIMAL)
346+
head = ['image'] + ["class_{0:}".format(i) for i in label_list]
347+
if(len(label_list) > 1):
348+
head = head + ["average"]
349+
csv_writer.writerow(head)
350+
for item in name_score_list:
351+
csv_writer.writerow(item)
352+
353+
print("{0:} mean ".format(metric), score_mean)
354+
print("{0:} std ".format(metric), score_std)
352355

353356
def main():
354357
"""

pymic/util/post_process.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ def __call__(self, seg):
4343
seg_c = np.asarray(seg == c, np.uint8)
4444
seg_c = get_largest_k_components(seg_c)
4545
output = output + seg_c * c
46+
seg = output
4647
return seg
4748

4849
PostProcessDict = {

requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@ scipy>=1.3.3
77
SimpleITK>=2.0.0
88
tensorboard>=2.1.0
99
tensorboardX>=1.9
10-
torch>=1.7.1
11-
torchvision>=0.8.2
10+
torch>=1.1.12
11+
torchvision>=0.13.0

setup.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
setuptools.setup(
1313
name = 'PYMIC',
14-
version = "0.3.0",
14+
version = "0.3.1",
1515
author ='PyMIC Consortium',
1616
author_email = 'wguotai@gmail.com',
1717
description = description,
@@ -20,6 +20,17 @@
2020
url = 'https://github.com/HiLab-git/PyMIC',
2121
license = 'Apache 2.0',
2222
packages = setuptools.find_packages(),
23+
install_requires=[
24+
"matplotlib>=3.1.2",
25+
"numpy>=1.17.4",
26+
"pandas>=0.25.3",
27+
"scikit-image>=0.16.2",
28+
"scikit-learn>=0.22",
29+
"scipy>=1.3.3",
30+
"SimpleITK>=2.0.0",
31+
"tensorboard>=2.1.0",
32+
"tensorboardX>=1.9",
33+
],
2334
classifiers=[
2435
'License :: OSI Approved :: Apache Software License',
2536
'Programming Language :: Python',

0 commit comments

Comments
 (0)