/usr/share/pyshared/dipy/io/dpy.py is in python-dipy 0.7.1-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | ''' A class for handling large tractography datasets.
It is built using the pytables tools which in turn implement
key features of the HDF5 (hierachical data format) API [1]_.
References
----------
.. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html
'''
import numpy as np
# Conditional import machinery for pytables
from ..utils.optpkg import optional_package
# Allow import, but disable doctests, if we don't have pytables
tables, have_tables, setup_module = optional_package('tables')
# Make sure not to carry across setup module from * import
__all__ = ['Dpy']
class Dpy(object):
def __init__(self,fname,mode='r',compression=0):
''' Advanced storage system for tractography based on HDF5
Parameters
------------
fname : str, full filename
mode : 'r' read
'w' write
'r+' read and write only if file already exists
'a' read and write even if file doesn't exist (not used yet)
compression : 0 no compression to 9 maximum compression
Examples
----------
>>> import os
>>> from tempfile import mkstemp #temp file
>>> from dipy.io.dpy import Dpy
>>> fd,fname = mkstemp()
>>> fname = fname + '.dpy' #add correct extension
>>> dpw = Dpy(fname,'w')
>>> A=np.ones((5,3))
>>> B=2*A.copy()
>>> C=3*A.copy()
>>> dpw.write_track(A)
>>> dpw.write_track(B)
>>> dpw.write_track(C)
>>> dpw.close()
>>> dpr = Dpy(fname,'r')
>>> A=dpr.read_track()
>>> B=dpr.read_track()
>>> T=dpr.read_tracksi([0,1,2,0,0,2])
>>> dpr.close()
>>> os.remove(fname) #delete file from disk
'''
self.mode=mode
self.f = tables.openFile(fname, mode = self.mode)
self.N = 5*10**9
self.compression = compression
if self.mode=='w':
self.streamlines=self.f.createGroup(self.f.root,'streamlines')
#create a version number
self.version=self.f.createArray(self.f.root,'version',['0.0.1'],'Dpy Version Number')
self.tracks = self.f.createEArray(self.f.root.streamlines, 'tracks',tables.Float32Atom(), (0, 3),
"scalar Float32 earray", tables.Filters(self.compression),expectedrows=self.N)
self.offsets = self.f.createEArray(self.f.root.streamlines, 'offsets',tables.Int64Atom(), (0,),
"scalar Int64 earray", tables.Filters(self.compression), expectedrows=self.N+1)
self.curr_pos=0
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
if self.mode=='r':
self.tracks=self.f.root.streamlines.tracks
self.offsets=self.f.root.streamlines.offsets
self.track_no=len(self.offsets)-1
self.offs_pos=0
def version(self):
ver=self.f.root.version[:]
return ver[0]
def write_track(self,track):
''' write on track each time
'''
self.tracks.append(track.astype(np.float32))
self.curr_pos+=track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def write_tracks(self,T):
''' write many tracks together
'''
for track in T:
self.tracks.append(track.astype(np.float32))
self.curr_pos+=track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def read_track(self):
''' read one track each time
'''
off0,off1=self.offsets[self.offs_pos:self.offs_pos+2]
self.offs_pos+=1
return self.tracks[off0:off1]
def read_tracksi(self,indices):
''' read tracks with specific indices
'''
T=[]
for i in indices:
#print(self.offsets[i:i+2])
off0,off1=self.offsets[i:i+2]
T.append(self.tracks[off0:off1])
return T
def read_tracks(self):
''' read the entire tractography
'''
I=self.offsets[:]
TR=self.tracks[:]
T=[]
for i in range(len(I)-1):
off0,off1=I[i:i+2]
T.append(TR[off0:off1])
return T
def close(self):
self.f.close()
if __name__ == '__main__':
pass
|