This file is indexed.

/usr/share/pyshared/nitime/analysis/snr.py is in python-nitime 0.5-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import numpy as np
from nitime.lazy import scipy_stats as stats

from nitime import descriptors as desc
from nitime import algorithms as tsa
from nitime import timeseries as ts

from nitime.index_utils import tril_indices_from


from .base import BaseAnalyzer


def signal_noise(response):
    """
    Signal and noise as defined in Borst and Theunissen 1999, Figure 2

    Parameters
    ----------

    response: nitime TimeSeries object
       The data here are individual responses of a single unit to the same
       stimulus, with repetitions being the first dimension and time as the
       last dimension
    """

    signal = np.mean(response.data, 0)  # The estimate of the signal is the
                                       # average response

    noise = response.data - signal  # Noise is the individual
                               # repetition's deviation from the
                               # estimate of the signal

    # Return TimeSeries objects with the sampling rate of the input:
    return  (ts.TimeSeries(signal, sampling_rate=response.sampling_rate),
             ts.TimeSeries(noise, sampling_rate=response.sampling_rate))


class SNRAnalyzer(BaseAnalyzer):
    """
    Calculate SNR for a response to repetitions of the same stimulus, according
    to (Borst, 1999) (Figure 2) and (Hsu, 2004).

    Hsu A, Borst A and Theunissen, FE (2004) Quantifying variability in neural
    responses ans its application for the validation of model
    predictions. Network: Comput Neural Syst 15:91-109

    Borst A and Theunissen FE (1999) Information theory and neural coding. Nat
    Neurosci 2:947-957
    """
    def __init__(self, input=None, bandwidth=None, adaptive=False,
                 low_bias=False):
        """
        Initializer for the multi_taper_SNR object

        Parameters
        ----------
        input: TimeSeries object

        bandwidth: float,
           The bandwidth of the windowing function will determine the number
           tapers to use. This parameters represents trade-off between
           frequency resolution (lower main lobe bandwidth for the taper) and
           variance reduction (higher bandwidth and number of averaged
           estimates). Per default will be set to 4 times the fundamental
           frequency, such that NW=4

        adaptive: bool, default to False
            Whether to set the weights for the tapered spectra according to the
            adaptive algorithm (Thompson, 2007).

        low_bias : bool, default to False
            Rather than use 2NW tapers, only use the tapers that have better
            than 90% spectral concentration within the bandwidth (still using a
            maximum of 2NW tapers)

        Notes
        -----

        Thompson, DJ (2007) Jackknifing multitaper spectrum estimates. IEEE
        Signal Processing Magazing. 24: 20-30

        """
        self.input = input
        self.signal, self.noise = signal_noise(input)
        self.bandwidth = bandwidth
        self.adaptive = adaptive
        self.low_bias = low_bias

    @desc.setattr_on_read
    def mt_frequencies(self):
        return np.linspace(0, self.input.sampling_rate / 2,
                           self.input.data.shape[-1] / 2 + 1)

    @desc.setattr_on_read
    def mt_signal_psd(self):
        _, p, _ = tsa.multi_taper_psd(self.signal.data,
                                    Fs=self.input.sampling_rate,
                                    BW=self.bandwidth,
                                    adaptive=self.adaptive,
                                    low_bias=self.low_bias)
        return p

    @desc.setattr_on_read
    def mt_noise_psd(self):
        p = np.empty((self.noise.data.shape[0],
                     self.noise.data.shape[-1] / 2 + 1))

        for i in range(p.shape[0]):
            _, p[i], _ = tsa.multi_taper_psd(self.noise.data[i],
                                    Fs=self.input.sampling_rate,
                                    BW=self.bandwidth,
                                    adaptive=self.adaptive,
                                    low_bias=self.low_bias)
        return np.mean(p, 0)

    @desc.setattr_on_read
    def mt_coherence(self):
        """ """
        return self.mt_signal_psd / (self.mt_signal_psd + self.mt_noise_psd)

    @desc.setattr_on_read
    def mt_information(self):
        df = self.mt_frequencies[1] - self.mt_frequencies[0]
        return -1 * np.log2(1 - self.mt_coherence) * df
        #These two formulations should be equivalent
        #return np.log2(1+self.mt_snr)

    @desc.setattr_on_read
    def mt_snr(self):
        return self.mt_signal_psd / self.mt_noise_psd

    @desc.setattr_on_read
    def correlation(self):
        """
        The correlation between all combinations of trials

        Returns
        -------
        (r,e) : tuple
           r is the mean correlation and e is the mean error of the correlation
           (with df = n_trials - 1)
        """

        c = np.corrcoef(self.input.data)
        c = c[tril_indices_from(c, -1)]

        return np.mean(c), stats.sem(c)