|
10 | 10 | """ |
11 | 11 | Output a summary table for neuroimaging files (resolution, dimensionality, etc.) |
12 | 12 | """ |
13 | | -from __future__ import division, print_function, absolute_import |
14 | 13 |
|
15 | | -import re |
16 | | -import sys |
17 | | - |
18 | | -import numpy as np |
19 | | -import nibabel as nib |
20 | | - |
21 | | -from math import ceil |
22 | | -from optparse import OptionParser, Option |
23 | | -from io import StringIO |
24 | | -from nibabel.py3k import asunicode |
25 | | - |
26 | | -__author__ = 'Yaroslav Halchenko' |
27 | | -__copyright__ = 'Copyright (c) 2011-2016 Yaroslav Halchenko ' \ |
28 | | - 'and NiBabel contributors' |
29 | | -__license__ = 'MIT' |
30 | | - |
31 | | - |
32 | | -# global verbosity switch |
33 | | -verbose_level = 0 |
34 | | -MAX_UNIQUE = 1000 # maximal number of unique values to report for --counts |
35 | | - |
36 | | -def _err(msg=None): |
37 | | - """To return a string to signal "error" in output table""" |
38 | | - if msg is None: |
39 | | - msg = 'error' |
40 | | - return '!' + msg |
41 | | - |
42 | | -def verbose(l, msg): |
43 | | - """Print `s` if `l` is less than the `verbose_level` |
44 | | - """ |
45 | | - # TODO: consider using nibabel's logger |
46 | | - if l <= int(verbose_level): |
47 | | - print("%s%s" % (' ' * l, msg)) |
48 | | - |
49 | | - |
50 | | -def error(msg, exit_code): |
51 | | - print >> sys.stderr, msg |
52 | | - sys.exit(exit_code) |
53 | | - |
54 | | - |
55 | | -def table2string(table, out=None): |
56 | | - """Given list of lists figure out their common widths and print to out |
57 | | -
|
58 | | - Parameters |
59 | | - ---------- |
60 | | - table : list of lists of strings |
61 | | - What is aimed to be printed |
62 | | - out : None or stream |
63 | | - Where to print. If None -- will print and return string |
64 | | -
|
65 | | - Returns |
66 | | - ------- |
67 | | - string if out was None |
68 | | - """ |
69 | | - |
70 | | - print2string = out is None |
71 | | - if print2string: |
72 | | - out = StringIO() |
73 | | - |
74 | | - # equalize number of elements in each row |
75 | | - nelements_max = \ |
76 | | - len(table) and \ |
77 | | - max(len(x) for x in table) |
78 | | - |
79 | | - for i, table_ in enumerate(table): |
80 | | - table[i] += [''] * (nelements_max - len(table_)) |
81 | | - |
82 | | - # figure out lengths within each column |
83 | | - atable = np.asarray(table) |
84 | | - # eat whole entry while computing width for @w (for wide) |
85 | | - markup_strip = re.compile('^@([lrc]|w.*)') |
86 | | - col_width = [max([len(markup_strip.sub('', x)) |
87 | | - for x in column]) for column in atable.T] |
88 | | - string = "" |
89 | | - for i, table_ in enumerate(table): |
90 | | - string_ = "" |
91 | | - for j, item in enumerate(table_): |
92 | | - item = str(item) |
93 | | - if item.startswith('@'): |
94 | | - align = item[1] |
95 | | - item = item[2:] |
96 | | - if align not in ['l', 'r', 'c', 'w']: |
97 | | - raise ValueError('Unknown alignment %s. Known are l,r,c' % |
98 | | - align) |
99 | | - else: |
100 | | - align = 'c' |
101 | | - |
102 | | - nspacesl = max(ceil((col_width[j] - len(item)) / 2.0), 0) |
103 | | - nspacesr = max(col_width[j] - nspacesl - len(item), 0) |
104 | | - |
105 | | - if align in ['w', 'c']: |
106 | | - pass |
107 | | - elif align == 'l': |
108 | | - nspacesl, nspacesr = 0, nspacesl + nspacesr |
109 | | - elif align == 'r': |
110 | | - nspacesl, nspacesr = nspacesl + nspacesr, 0 |
111 | | - else: |
112 | | - raise RuntimeError('Should not get here with align=%s' % align) |
113 | | - |
114 | | - string_ += "%%%ds%%s%%%ds " \ |
115 | | - % (nspacesl, nspacesr) % ('', item, '') |
116 | | - string += string_.rstrip() + '\n' |
117 | | - out.write(asunicode(string)) |
118 | | - |
119 | | - if print2string: |
120 | | - value = out.getvalue() |
121 | | - out.close() |
122 | | - return value |
123 | | - |
124 | | - |
125 | | -def ap(l, format_, sep=', '): |
126 | | - """Little helper to enforce consistency""" |
127 | | - if l == '-': |
128 | | - return l |
129 | | - ls = [format_ % x for x in l] |
130 | | - return sep.join(ls) |
131 | | - |
132 | | - |
133 | | -def safe_get(obj, name): |
134 | | - """A getattr which would return '-' if getattr fails |
135 | | - """ |
136 | | - try: |
137 | | - f = getattr(obj, 'get_' + name) |
138 | | - return f() |
139 | | - except Exception as e: |
140 | | - verbose(2, "get_%s() failed -- %s" % (name, e)) |
141 | | - return '-' |
142 | | - |
143 | | - |
144 | | -def get_opt_parser(): |
145 | | - # use module docstring for help output |
146 | | - p = OptionParser( |
147 | | - usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, |
148 | | - version="%prog " + nib.__version__) |
149 | | - |
150 | | - p.add_options([ |
151 | | - Option("-v", "--verbose", action="count", |
152 | | - dest="verbose", default=0, |
153 | | - help="Make more noise. Could be specified multiple times"), |
154 | | - |
155 | | - Option("-H", "--header-fields", |
156 | | - dest="header_fields", default='', |
157 | | - help="Header fields (comma separated) to be printed as well (if present)"), |
158 | | - |
159 | | - Option("-s", "--stats", |
160 | | - action="store_true", dest='stats', default=False, |
161 | | - help="Output basic data statistics"), |
162 | | - |
163 | | - Option("-c", "--counts", |
164 | | - action="store_true", dest='counts', default=False, |
165 | | - help="Output counts - number of entries for each numeric value " |
166 | | - "(useful for int ROI maps)"), |
167 | | - |
168 | | - Option("--all-counts", |
169 | | - action="store_true", dest='all_counts', default=False, |
170 | | - help="Output all counts, even if number of unique values > %d" % MAX_UNIQUE), |
171 | | - |
172 | | - Option("-z", "--zeros", |
173 | | - action="store_true", dest='stats_zeros', default=False, |
174 | | - help="Include zeros into output basic data statistics (--stats, --counts)"), |
175 | | - ]) |
176 | | - |
177 | | - return p |
178 | | - |
179 | | - |
180 | | -def proc_file(f, opts): |
181 | | - verbose(1, "Loading %s" % f) |
182 | | - |
183 | | - row = ["@l%s" % f] |
184 | | - try: |
185 | | - vol = nib.load(f) |
186 | | - h = vol.header |
187 | | - except Exception as e: |
188 | | - row += ['failed'] |
189 | | - verbose(2, "Failed to gather information -- %s" % str(e)) |
190 | | - return row |
191 | | - |
192 | | - row += [str(safe_get(h, 'data_dtype')), |
193 | | - '@l[%s]' % ap(safe_get(h, 'data_shape'), '%3g'), |
194 | | - '@l%s' % ap(safe_get(h, 'zooms'), '%.2f', 'x')] |
195 | | - # Slope |
196 | | - if hasattr(h, 'has_data_slope') and \ |
197 | | - (h.has_data_slope or h.has_data_intercept) and \ |
198 | | - not h.get_slope_inter() in [(1.0, 0.0), (None, None)]: |
199 | | - row += ['@l*%.3g+%.3g' % h.get_slope_inter()] |
200 | | - else: |
201 | | - row += [''] |
202 | | - |
203 | | - if hasattr(h, 'extensions') and len(h.extensions): |
204 | | - row += ['@l#exts: %d' % len(h.extensions)] |
205 | | - else: |
206 | | - row += [''] |
207 | | - |
208 | | - if opts.header_fields: |
209 | | - # signals "all fields" |
210 | | - if opts.header_fields == 'all': |
211 | | - # TODO: might vary across file types, thus prior sensing |
212 | | - # would be needed |
213 | | - header_fields = h.keys() |
214 | | - else: |
215 | | - header_fields = opts.header_fields.split(',') |
216 | | - |
217 | | - for f in header_fields: |
218 | | - if not f: # skip empty |
219 | | - continue |
220 | | - try: |
221 | | - row += [str(h[f])] |
222 | | - except (KeyError, ValueError): |
223 | | - row += [_err()] |
224 | | - |
225 | | - try: |
226 | | - if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and |
227 | | - (h.get_qform() != h.get_sform()).any()): |
228 | | - row += ['sform'] |
229 | | - else: |
230 | | - row += [''] |
231 | | - except Exception as e: |
232 | | - verbose(2, "Failed to obtain qform or sform -- %s" % str(e)) |
233 | | - if isinstance(h, nib.AnalyzeHeader): |
234 | | - row += [''] |
235 | | - else: |
236 | | - row += [_err()] |
237 | | - |
238 | | - if opts.stats or opts.counts: |
239 | | - # We are doomed to load data |
240 | | - try: |
241 | | - d = vol.get_data() |
242 | | - if not opts.stats_zeros: |
243 | | - d = d[np.nonzero(d)] |
244 | | - else: |
245 | | - # at least flatten it -- functionality below doesn't |
246 | | - # depend on the original shape, so let's use a flat view |
247 | | - d = d.reshape(-1) |
248 | | - if opts.stats: |
249 | | - # just # of elements |
250 | | - row += ["@l[%d]" % np.prod(d.shape)] |
251 | | - # stats |
252 | | - row += [len(d) and '@l[%.2g, %.2g]' % (np.min(d), np.max(d)) or '-'] |
253 | | - if opts.counts: |
254 | | - items, inv = np.unique(d, return_inverse=True) |
255 | | - if len(items) > 1000 and not opts.all_counts: |
256 | | - counts = _err("%d uniques. Use --all-counts" % len(items)) |
257 | | - else: |
258 | | - freq = np.bincount(inv) |
259 | | - counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) |
260 | | - row += ["@l" + counts] |
261 | | - except IOError as e: |
262 | | - verbose(2, "Failed to obtain stats/counts -- %s" % str(e)) |
263 | | - row += [_err()] |
264 | | - return row |
265 | | - |
266 | | - |
267 | | -def main(): |
268 | | - """Show must go on""" |
269 | | - |
270 | | - parser = get_opt_parser() |
271 | | - (opts, files) = parser.parse_args() |
272 | | - |
273 | | - global verbose_level |
274 | | - verbose_level = opts.verbose |
275 | | - |
276 | | - if verbose_level < 3: |
277 | | - # suppress nibabel format-compliance warnings |
278 | | - nib.imageglobals.logger.level = 50 |
279 | | - |
280 | | - rows = [proc_file(f, opts) for f in files] |
281 | | - |
282 | | - print(table2string(rows)) |
| 14 | +from nibabel.cmdline.ls import main |
283 | 15 |
|
284 | 16 |
|
285 | 17 | if __name__ == '__main__': |
|
0 commit comments