This file is indexed.

/usr/share/pyshared/nibabel/parrec.py is in python-nibabel 1.2.2-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
#   See COPYING file distributed along with the NiBabel package for the
#   copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Humble attempt to read images in PAR/REC format.

This is yet another MRI image format generated by Phillips
scanner. It is an ASCII header (PAR) plus a binary blob (REC).

This implementation aims to read version 4.2 of this format. Other versions
could probably be supported, but the author is lacking samples of them.
"""

import warnings
import numpy as np
import copy

from .spatialimages import SpatialImage, Header
from .eulerangles import euler2mat
from .volumeutils import Recoder

# PAR header versions we claim to understand
supported_versions = ['V4.2']

# assign props to PAR header entries
# values are: (shortname[, dtype[, shape]])
_hdr_key_dict = {
    'Patient name': ('patient_name',),
    'Examination name': ('exam_name',),
    'Protocol name': ('protocol_name',),
    'Examination date/time': ('exam_date',),
    'Series Type': ('series_type',),
    'Acquisition nr': ('acq_nr', int),
    'Reconstruction nr': ('recon_nr', int),
    'Scan Duration [sec]': ('scan_duration', float),
    'Max. number of cardiac phases': ('max_cardiac_phases', int),
    'Max. number of echoes': ('max_echoes', int),
    'Max. number of slices/locations': ('max_slices', int),
    'Max. number of dynamics': ('max_dynamics', int),
    'Max. number of mixes': ('max_mixes', int),
    'Patient position': ('patient_position',),
    'Preparation direction': ('prep_direction',),
    'Technique': ('tech',),
    'Scan resolution  (x, y)': ('scan_resolution', int, (2,)),
    'Scan mode': ('san_mode',),
    'Repetition time [ms]': ('repetition_time', float),
    'FOV (ap,fh,rl) [mm]': ('fov', float, (3,)),
    'Water Fat shift [pixels]': ('water_fat_shift', float),
    'Angulation midslice(ap,fh,rl)[degr]': ('angulation', float, (3,)),
    'Off Centre midslice(ap,fh,rl) [mm]': ('off_center', float, (3,)),
    'Flow compensation <0=no 1=yes> ?': ('flow_compensation', int),
    'Presaturation     <0=no 1=yes> ?': ('presaturation', int),
    'Phase encoding velocity [cm/sec]': ('phase_enc_velocity', float, (3,)),
    'MTC               <0=no 1=yes> ?': ('mtc', int),
    'SPIR              <0=no 1=yes> ?': ('spir', int),
    'EPI factor        <0,1=no EPI>': ('epi_factor', int),
    'Dynamic scan      <0=no 1=yes> ?': ('dyn_scan', int),
    'Diffusion         <0=no 1=yes> ?': ('diffusion', int),
    'Diffusion echo time [ms]': ('diffusion_echo_time', float),
    'Max. number of diffusion values': ('max_diffusion_values', int),
    'Max. number of gradient orients': ('max_gradient_orient', int),
    'Number of label types   <0=no ASL>': ('nr_label_types', int),
    }

# header items order per image definition line
image_def_dtd = [
    ('slice number', int),
    ('echo number', int,),
    ('dynamic scan number', int,),
    ('cardiac phase number', int,),
    ('image_type_mr', int,),
    ('scanning sequence', int,),
    ('index in REC file', int,),
    ('image pixel size', int,),
    ('scan percentage', int,),
    ('recon resolution', int, (2,)),
    ('rescale intercept', float),
    ('rescale slope', float),
    ('scale slope', float),
    ('window center', int,),
    ('window width', int,),
    ('image angulation', float, (3,)),
    ('image offcentre', float, (3,)),
    ('slice thickness', float),
    ('slice gap', float),
    ('image_display_orientation', int,),
    ('slice orientation', int,),
    ('fmri_status_indication', int,),
    ('image_type_ed_es', int,),
    ('pixel spacing', float, (2,)),
    ('echo_time', float),
    ('dyn_scan_begin_time', float),
    ('trigger_time', float),
    ('diffusion_b_factor', float),
    ('number of averages', int,),
    ('image_flip_angle', float),
    ('cardiac frequency', int,),
    ('minimum RR-interval', int,),
    ('maximum RR-interval', int,), 
    ('TURBO factor', int,),
    ('Inversion delay', float),
    ('diffusion b value number', int,),    # (imagekey!)
    ('gradient orientation number', int,), # (imagekey!)
    ('contrast type', 'S30'),              # XXX might be too short?
    ('diffusion anisotropy type', 'S30'),  # XXX might be too short?
    ('diffusion', float, (3,)),
    ('label type', int,),                  # (imagekey!)
    ]
image_def_dtype = np.dtype(image_def_dtd)

# slice orientation codes
slice_orientation_codes = Recoder((# code, label
    (1, 'transversal'),
    (2, 'sagital'),
    (3, 'coronal')), fields=('code', 'label'))


class PARRECError(Exception):
    """Exception for PAR/REC format related problems.

    To be raised whenever PAR/REC is not happy, or we are not happy with
    PAR/REC.
    """
    pass


def parse_PAR_header(fobj):
    """Parse a PAR header and aggregate all information into useful containers.

    Parameters
    ----------
    fobj : file-object
      The PAR header file object.

    Returns
    -------
    (dict, array)
      The dictionary contains all "General Information" from the header file,
      while the (structured) has the properties of all image definitions in the
      header
    """
    # containers for relevant header lines
    general_info = {}
    image_info = []
    version = None

    # single pass through the header
    for line in fobj:
        # no junk
        line = line.strip()
        if line.startswith('#'):
            # try to get the header version
            if line.count('image export tool'):
                version = line.split()[-1]
                if not version in supported_versions:
                    warnings.warn(
                          "PAR/REC version '%s' is currently not "
                          "supported -- making an attempt to read "
                          "nevertheless. Please email the NiBabel "
                          "mailing list, if you are interested in "
                          "adding support for this version."
                          % version)
            else:
                # just a comment
                continue
        elif line.startswith('.'):
            # read 'general information' and store in a dict
            first_colon = line[1:].find(':') + 1
            key = line[1:first_colon].strip()
            value = line[first_colon + 1:].strip()
            # get props for this hdr field
            props = _hdr_key_dict[key]
            # turn values into meaningful dtype
            if len(props) == 2:
                # only dtype spec and no shape
                value = props[1](value)
            elif len(props) == 3:
                # array with dtype and shape
                value = np.fromstring(value, props[1], sep=' ')
                value.shape = props[2]
            general_info[props[0]] = value
        elif line:
            # anything else is an image definition: store for later
            # processing
            image_info.append(line)

    # postproc image def props
    # create an array for all image defs
    image_defs = np.zeros(len(image_info), dtype=image_def_dtype)

    # for every image definition
    for i, line in enumerate(image_info):
        items = line.split()
        item_counter = 0
        # for all image properties we know about
        for props in image_def_dtd:
            if np.issubdtype(image_defs[props[0]].dtype, str):
                # simple string
                image_defs[props[0]][i] = items[item_counter]
                item_counter += 1
            elif len(props) == 2:
                # prop with numerical dtype
                image_defs[props[0]][i] = props[1](items[item_counter])
                item_counter += 1
            elif len(props) == 3:
                # array prop with dtype
                nelements = np.prod(props[2])
                # get as many elements as necessary
                itms = items[item_counter:item_counter + nelements]
                # convert to array with dtype
                value = np.fromstring(" ".join(itms), props[1], sep=' ')
                # store
                image_defs[props[0]][i] = value
                item_counter += nelements

    return general_info, image_defs


class PARRECHeader(Header):
    """PAR/REC header"""
    def __init__(self, info, image_defs):
        """
        Parameters
        ----------
        info : dict
          "General information" from the PAR file (as returned by
          `parse_PAR_header()`).
        image_defs : array
          Structured array with image definitions from the PAR file (as returned
          by `parse_PAR_header()`).
        """
        self.general_info = info
        self.image_defs = image_defs
        self._slice_orientation = None

        # charge with basic properties to be able to use base class
        # functionality
        # dtype
        dtype = np.typeDict[
                    'int'
                    + str(self._get_unique_image_prop('image pixel size')[0])]
        Header.__init__(self,
                        data_dtype=dtype,
                        shape=self.get_data_shape_in_file(),
                        zooms=self._get_zooms()
                       )


    @classmethod
    def from_header(klass, header=None):
        if header is None:
            raise PARRECError('Cannot create PARRECHeader from air.')
        if type(header) == klass:
            return header.copy()
        raise PARRECError('Cannot create PARREC header from non-PARREC header.')


    @classmethod
    def from_fileobj(klass, fileobj):
        info, image_defs = parse_PAR_header(fileobj)
        return klass(info, image_defs)


    def copy(self):
        return PARRECHeader(
                copy.deepcopy(self.general_info),
                self.image_defs.copy())


    def _get_unique_image_prop(self, name):
        """Scan all image definitions and return the unique value of a property.

        If the requested property is an array this method behave _not_ like
        `np.unique`. It will return the unique combination of all array elements
        for any image definition, and _not_ the unique element values.

        Raises
        ------
        If there is more than a single unique value a `PARRECError` is raised.
        """
        prop = self.image_defs[name]
        if len(prop.shape) > 1:
            uprops = [np.unique(prop[i]) for i in range(len(prop.shape))]
        else:
            uprops = [np.unique(prop)]
        if not np.prod([len(uprop) for uprop in uprops]) == 1:
            raise PARRECError('Varying %s in image sequence (%s). This is not '
                              'suppported.' % (name, uprops))
        else:
            return np.array([uprop[0] for uprop in uprops])


    def get_voxel_size(self):
        """Returns the spatial extent of a voxel.

        Returns
        -------
        Array
        """
        # slice orientation for the whole image series
        slice_thickness = self._get_unique_image_prop('slice thickness')[0]
        voxsize_inplane = self._get_unique_image_prop('pixel spacing')
        voxsize = np.array((voxsize_inplane[0],
                            voxsize_inplane[1],
                            slice_thickness))
        return voxsize


    def get_ndim(self):
        """Return the number of dimensions of the image data."""
        if self.general_info['max_dynamics'] > 1 \
           or self.general_info['max_gradient_orient'] > 1:
            return 4
        else:
            return 3


    def _get_zooms(self):
        """Compute image zooms from header data.

        Spatial axis are first three.
        """
        # slice orientation for the whole image series
        slice_gap = self._get_unique_image_prop('slice gap')[0]
        # scaling per image axis
        zooms = np.ones(self.get_ndim())
        # spatial axes correspond to voxelsize + inter slice gap
        # voxel size (inplaneX, inplaneY, slices)
        zooms[:3] = self.get_voxel_size()
        zooms[2] += slice_gap
        # time axis?
        if len(zooms) > 3  and self.general_info['max_dynamics'] > 1:
            # DTI also has 4D
            zooms[3] = self.general_info['repetition_time']
        return zooms


    def get_affine(self, origin='scanner'):
        """Compute affine transformation into scanner space.

        The method only considers global rotation and offset settings in the
        header and ignore potentially deviating information in the image
        definitions.

        Parameters
        ----------
        origin : {'scanner', 'fov'}
          Transformation origin. By default the transformation is computed
          relative to the scanner's iso center. If 'fov' is requested
          the transformation origin will be the center of the field of view
          instead.

        Returns
        -------
        array
          4x4 array, with axis order corresponding to (x,y,z) or (lr, pa, fh).
        """
        # hdr has deg, we need radian
        # order is [ap, fh, rl]
        ang_rad = self.general_info['angulation'] * np.pi / 180.0
        # need to rotate back from what was given in the file
        ang_rad *= -1

        # R2AGUI approach is this, but it comes with remarks ;-)
        # % trying to incorporate AP FH RL rotation angles: determined using some 
        # % common sense, Chris Rordon's help + source code and trial and error, 
        # % this is considered EXPERIMENTAL!
        rot_rl = np.mat(
                [[1.0, 0.0, 0.0],
                 [0.0, np.cos(ang_rad[2]), -np.sin(ang_rad[2])],
                 [0.0, np.sin(ang_rad[2]), np.cos(ang_rad[2])]]
                )
        rot_ap = np.mat(
                [[np.cos(ang_rad[0]), 0.0, np.sin(ang_rad[0])],
                 [0.0, 1.0, 0.0],
                 [-np.sin(ang_rad[0]), 0.0, np.cos(ang_rad[0])]]
                )
        rot_fh = np.mat(
                [[np.cos(ang_rad[1]), -np.sin(ang_rad[1]), 0.0],
                 [np.sin(ang_rad[1]), np.cos(ang_rad[1]), 0.0],
                 [0.0, 0.0, 1.0]]
                )
        rot_r2agui = rot_rl * rot_ap * rot_fh
        # NiBabel way of doing it
        # order is [ap, fh, rl]
        #           x   y   z
        #           0   1   2
        rot_nibabel = euler2mat(ang_rad[1], ang_rad[0], ang_rad[2])

        # XXX for now put some safety net, until we have recorded proper
        # test data with oblique orientations and different readout directions
        # to verify the order of arguments of euler2mat
        assert(np.all(rot_r2agui == rot_nibabel))
        rot = rot_nibabel

        # FOV (always in ap, fh, rl)
        fov = self.general_info['fov']
        # voxel size always (inplaneX, inplaneY, slicethickness (without gap))
        voxsize = self.get_voxel_size()

        slice_orientation = self.get_slice_orientation()
        if slice_orientation == 'sagital':
            # inplane: AP, FH   slices: RL
            recfg_data_axis = np.mat([[  0,  0,  1],
                                      [ -1,  0,  0],
                                      [  0, -1,  0]])
            # fov is already like the data
            fov = fov
        elif slice_orientation == 'transversal':
            # inplane: RL, AP   slices: FH
            recfg_data_axis = np.mat([[ -1,  0,  0],
                                      [  0, -1,  0],
                                      [  0,  0,  1]])
            # fov is already like the data
            fov = fov[[2,0,1]]
        elif slice_orientation == 'coronal':
            # inplane: RL, FH   slices: AP
            recfg_data_axis = np.mat([[ -1,  0,  0],
                                      [  0,  0, -1],
                                      [  0, -1,  0]])
            # fov is already like the data
            fov = fov[[2,1,0]]
        else:
            raise PARRECError("Unknown slice orientation (%s)."
                              % slice_orientation)

        rot = rot * recfg_data_axis

        # ijk origin should be: Anterior, Right, Foot
        # qform should point to the center of the voxel
        fov_center_offset = self.get_voxel_size() / 2 - fov / 2

        # need to rotate this offset into scanner space
        fov_center_offset = np.dot(rot, fov_center_offset)

        # get the scaling by voxelsize and slice thickness (incl. gap)
        scaled = rot * np.mat(np.diag(self.get_zooms()[:3]))

        # compose the affine
        aff = np.eye(4)
        aff[:3,:3] = scaled
        # offset
        aff[:3,3] = fov_center_offset
        if origin == 'fov':
            pass
        elif origin == 'scanner':
            # offset to scanner's iso center (always in ap, fh, rl)
            # -- turn into rl, ap, fh and then lr, pa, fh
            iso_offset = self.general_info['off_center'][[2,0,1]] * [-1,-1,0]
            aff[:3,3] += iso_offset
        return aff


    def get_data_shape_in_file(self):
        """Return the shape of the binary blob in the REC file.

        Returns
        -------
        tuple
          (inplaneX, inplaneY, nslices, ndynamics/ndirections)
        """
        # e.g. number of volumes
        ndynamics = len(np.unique(self.image_defs['dynamic scan number']))
        # DTI volumes (b-values-1 x directions)
        # there is some awkward exception to this rule for b-values > 2
        # XXX need to get test image...
        ndtivolumes = (self.general_info['max_diffusion_values'] - 1) \
                        * self.general_info['max_gradient_orient']
        nslices = len(np.unique(self.image_defs['slice number']))
        if not nslices == self.general_info['max_slices']:
            raise PARRECError("Header inconsistency: Found %i slices, "
                              "but header claims to have %i."
                              % (nslices, self.general_info['max_slices']))

        inplane_shape = tuple(self._get_unique_image_prop('recon resolution'))

        # there should not be both: multiple dynamics and DTI
        if ndynamics > 1:
            return inplane_shape + (nslices, ndynamics)
        elif ndtivolumes > 1:
            return inplane_shape + (nslices, ndtivolumes)
        else:
            return tuple(inplane_shape) + (nslices,)


    def get_data_scaling(self, method="dv"):
        """Returns scaling slope and intercept.

        Parameters
        ----------
        method : {'fp', 'dv'}
          Scaling settings to be reported -- see notes below.

        Notes
        -----
        The PAR header contains two different scaling settings: 'dv' (value on
        console) and 'fp' (floating point value). Here is how they are defined:

        PV: value in REC
        RS: rescale slope
        RI: rescale intercept
        SS: scale slope

        DV = PV * RS + RI
        FP = DV / (RS * SS)
        """
        # XXX: FP tends to become HUGE, DV seems to be more reasonable -> figure
        #      out which one means what

        # although the is a per-image scaling in the header, it looks like
        # there is just one unique factor and intercept per whole image series
        scale_slope = self._get_unique_image_prop('scale slope')
        rescale_slope = self._get_unique_image_prop('rescale slope')
        rescale_intercept = self._get_unique_image_prop('rescale intercept')

        if method == 'dv':
            slope = rescale_slope
            intercept = rescale_intercept
        elif method == 'fp':
            # actual slopes per definition above
            slope = 1.0 / scale_slope
            # actual intercept per definition above
            intercept = rescale_intercept / (rescale_slope * scale_slope)
        else:
            raise ValueError("Unknown scling method '%s'." % method)
        return (slope, intercept)


    def get_slice_orientation(self):
        """Returns the slice orientation label.

        Returns
        -------
        {'transversal', 'sagital', 'coronal'}
        """
        if self._slice_orientation is None:
            self._slice_orientation = \
                slice_orientation_codes.label[
                    self._get_unique_image_prop('slice orientation')[0]]
        return self._slice_orientation


    def raw_data_from_fileobj(self, fileobj):
        """Returns memmap array of raw unscaled image data.

        Array axes correspond to x,y,z,t.
        """
        # memmap the data -- it is guaranteed to be uncompressed and all
        # properties are known
        # read in Fortran order to have spatial axes first
        data = np.memmap(fileobj,
                         dtype=self.get_data_dtype(),
                         mode='c', # copy-on-write
                         shape=self.get_data_shape_in_file(),
                         order='F')
        return data


    def data_from_fileobj(self, fileobj):
        """Returns scaled image data.

        Behaves identical to `PARRECHeader.raw_data_from_fileobj()`, but
        returns scaled image data. This causes the images data to be loaded into
        memory.
        """
        unscaled = self.raw_data_from_fileobj(fileobj)
        slope, intercept = self.get_data_scaling()
        scaled = unscaled * slope
        scaled += intercept
        return scaled



class PARRECImage(SpatialImage):
    """PAR/REC image"""
    header_class = PARRECHeader
    files_types = (('image', '.rec'), ('header', '.par'))

    class ImageArrayProxy(object):
        def __init__(self, rec_fobj, hdr):
            self._rec_fobj = rec_fobj
            self._hdr = hdr
            self._data = None

        def __array__(self):
            ''' Cached read of data from file '''
            if self._data is None:
                self._data = self._hdr.data_from_fileobj(self._rec_fobj)
            return self._data

        @property
        def shape(self):
            # embedded header knows it, without having to touch the data
            return self._hdr.get_data_shape()


    @classmethod
    def from_file_map(klass, file_map):
        hdr_fobj = file_map['header'].get_prepare_fileobj()
        rec_fobj = file_map['image'].get_prepare_fileobj()
        hdr = PARRECHeader.from_fileobj(hdr_fobj)
        data = klass.ImageArrayProxy(rec_fobj, hdr)
        return klass(data,
                     hdr.get_affine(),
                     header=hdr,
                     extra=None,
                     file_map=file_map)


load = PARRECImage.load