This file is indexed.

/usr/share/pyshared/brian/directcontrol.py is in python-brian 1.3.1-1build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
# ----------------------------------------------------------------------------------
# Copyright ENS, INRIA, CNRS
# Contributors: Romain Brette (brette@di.ens.fr) and Dan Goodman (goodman@di.ens.fr)
# 
# Brian is a computer program whose purpose is to simulate models
# of biological neural networks.
# 
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software.  You can  use, 
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info". 
# 
# As a counterpart to the access to the source code and  rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty  and the software's author,  the holder of the
# economic rights,  and the successive licensors  have only  limited
# liability. 
# 
# In this respect, the user's attention is drawn to the risks associated
# with loading,  using,  modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean  that it is complicated to manipulate,  and  that  also
# therefore means  that it is reserved for developers  and  experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or 
# data to be ensured and,  more generally, to use and operate it in the 
# same conditions as regards security. 
# 
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# ----------------------------------------------------------------------------------
# 
"""Direct controlling mechanisms

NeuronGroups and callable objects which allow direct
control over the behaviour of neurons.
"""

__all__ = ['MultipleSpikeGeneratorGroup', 'SpikeGeneratorGroup', 'PulsePacket',
           'PoissonGroup', 'OfflinePoissonGroup', 'PoissonInput']

from neurongroup import *
from threshold import *
from stateupdater import *
from units import *
import random as pyrandom
from numpy import where, array, zeros, ones, inf, nonzero, tile, sum, isscalar,\
                  cumsum, hstack, bincount,  ceil, ndarray, ascontiguousarray
from copy import copy
from clock import guess_clock
from utils.approximatecomparisons import *
import warnings
from operator import itemgetter
from log import *
import numpy
from numpy.random import exponential, randint, binomial
from connections import Connection

class MultipleSpikeGeneratorGroup(NeuronGroup):
    """Emits spikes at given times
    
    **Initialised as:** ::
    
        MultipleSpikeGeneratorGroup(spiketimes[,clock[,period]])
    
    with arguments: 

    ``spiketimes``
        a list of spike time containers, one for each neuron in the group,
        although note that elements of ``spiketimes`` can also be callable objects which
        return spike time containers if you want to be able to reinitialise (see below).
        At it's simplest, ``spiketimes`` could be a list of lists, where ``spiketimes[0]`` contains
        the firing times for neuron 0, ``spiketimes[1]`` for neuron 1, etc. But, any iterable
        object can be passed, so ``spiketimes[0]`` could be a generator for example. Each
        spike time container should be sorted in time. If the containers are numpy arrays units
        will not be checked (times should be in seconds).
    ``clock``
        A clock, if omitted the default clock will be used.
    ``period``
        Optionally makes the spikes recur periodically with the given
        period. Note that iterator objects cannot be used as the ``spikelist``
        with a period as they cannot be reinitialised.
    
    Note that if two or more spike times fall within the same ``dt``, spikes will stack up
    and come out one per dt until the stack is exhausted. A warning will be generated
    if this happens.

    Also note that if you pass a generator, then reinitialising the group will not have the
    expected effect because a generator object cannot be reinitialised. Instead, you should
    pass a callable object which returns a generator, this will be called each time the
    object is reinitialised by calling the ``reinit()`` method.
        
    **Sample usage:** ::
    
        spiketimes = [[1*msecond, 2*msecond]]
        P = MultipleSpikeGeneratorGroup(spiketimes)
    """
    def __init__(self, spiketimes, clock=None, period=None):
        """Pass spiketimes
        
        spiketimes is a list of lists, one list for each neuron
        in the group. Each sublist consists of the spike times.
        """
        clock = guess_clock(clock)
        thresh = MultipleSpikeGeneratorThreshold(spiketimes, period=period)
        NeuronGroup.__init__(self, len(spiketimes), model=LazyStateUpdater(), threshold=thresh, clock=clock)

    def reinit(self):
        super(MultipleSpikeGeneratorGroup, self).reinit()
        self._threshold.reinit()

    def __repr__(self):
        return "MultipleSpikeGeneratorGroup"


class MultipleSpikeGeneratorThreshold(Threshold):
    def __init__(self, spiketimes, period=None):
        self.set_spike_times(spiketimes, period=period)

    def reinit(self):
        # spiketimes is a container where each element is an iterable container of spike times for
        # each neuron. We store the iterator for each neuron, and the next spike time if it exists
        # or None if there are no spikes or no more spikes
        def makeiter(obj):
            if callable(obj): return iter(obj())
            return iter(obj)
        self.spiketimeiter = [makeiter(st) for st in self.spiketimes]
        self.nextspiketime = [None for st in self.spiketimes]
        for i in range(len(self.spiketimes)):
            try:
                self.nextspiketime[i] = self.spiketimeiter[i].next()
            except StopIteration:
                pass
        self.curperiod = -1

    def set_spike_times(self, spiketimes, period=None):
        self.spiketimes = spiketimes
        self.period = period
        self.reinit()

    def __call__(self, P):
        firing = zeros(len(self.spiketimes))
        t = P.clock.t
        if self.period is not None:
            cp = int(t / self.period)
            if cp > self.curperiod:
                self.reinit()
                self.curperiod = cp
            t = t - cp * self.period
        # it is the iterator for neuron i, and nextspiketime is the stored time of the next spike
        for it, nextspiketime, i in zip(self.spiketimeiter, self.nextspiketime, range(len(self.spiketimes))):
            # Note we use approximate equality testing because the clock t+=dt mechanism accumulates errors
            if isinstance(self.spiketimes[i], numpy.ndarray):
                curt = float(t)
            else:
                curt = t
            if nextspiketime is not None and is_approx_less_than_or_equal(nextspiketime, curt):
                firing[i] = 1
                try:
                    nextspiketime = it.next()
                    if is_approx_less_than_or_equal(nextspiketime, curt):
                        log_warn('brian.MultipleSpikeGeneratorThreshold', 'Stacking multiple firing times')
                except StopIteration:
                    nextspiketime = None
                self.nextspiketime[i] = nextspiketime
        return where(firing)[0]


class SpikeGeneratorGroup(NeuronGroup):
    """Emits spikes at given times
    
    Initialised as::
    
        SpikeGeneratorGroup(N,spiketimes[,clock[,period]])
    
    with arguments:
    
    ``N``
        The number of neurons in the group.
    ``spiketimes``
        An object specifying which neurons should fire and when. It can be a container
        such as a ``list``, containing tuples ``(i,t)`` meaning neuron ``i`` fires at
        time ``t``, or a callable object which returns such a container (which
        allows you to use generator objects even though this is slower, see below). ``i`` can be an integer
        or an array (list of neurons that spike at the same time).
        If ``spiketimes`` is not a list or tuple, the pairs ``(i,t)`` need to be
        sorted in time. You can also pass a numpy array
        ``spiketimes`` where the first column of the array
        is the neuron indices, and the second column is the times in
        seconds. Alternatively you can pass a tuple with two arrays, the first one being the neuron indices and the second one times. WARNING: units are not checked in this case, the time array should be in seconds.
    ``clock``
        An optional clock to update with (omit to use the default clock).
    ``period``
        Optionally makes the spikes recur periodically with the given
        period. Note that iterator objects cannot be used as the ``spikelist``
        with a period as they cannot be reinitialised.
    ``gather=False``
        Set to True if you want to gather spike events that fall in the same
        timestep. (Deprecated since Brian 1.3.1)
    ``sort=True``
        Set to False if your spike events are already sorted.
    
    Has an attribute:
    
    ``spiketimes``
        This can be used to reset the list of spike times, however the values of
        ``N``, ``clock`` and ``period`` cannot be changed. 
        
    **Sample usages**
    
    The simplest usage would be a list of pairs ``(i,t)``::
    
        spiketimes = [(0,1*ms), (1,2*ms)]
        SpikeGeneratorGroup(N,spiketimes)
    
    A more complicated example would be to pass a generator::

        import random
        def nextspike():
            nexttime = random.uniform(0*ms,10*ms)
            while True:
                yield (random.randint(0,9),nexttime)
                nexttime = nexttime + random.uniform(0*ms,10*ms)
        P = SpikeGeneratorGroup(10,nextspike())
    
    This would give a neuron group ``P`` with 10 neurons, where a random one
    of the neurons fires at an average rate of one every 5ms. Please note that as of 1.3.1, this behavior is preserved but will run slower than initializing with arrays, or lists.
    
    **Notes**
    
    Note that if a neuron fires more than one spike in a given interval ``dt``, additional
    spikes will be discarded. If you want them to stack, consider using the less efficient
    :class:`MultipleSpikeGeneratorGroup` object instead. A warning will be issued if this
    is detected.

    Also, if you want to use a SpikeGeneratorGroup with many spikes and/or neurons, please use an initialization with arrays.
    
    Also note that if you pass a generator, then reinitialising the group will not have the
    expected effect because a generator object cannot be reinitialised. Instead, you should
    pass a callable object which returns a generator. In the example above, that would be
    done by calling::
    
        P = SpikeGeneratorGroup(10,nextspike)
        
    Whenever P is reinitialised, it will call ``nextspike()`` to create the required spike
    container.
    """
    def __init__(self, N, spiketimes, clock=None, period=None, 
                 sort=True, gather=None):
        clock = guess_clock(clock)
        self.N = N
        self.period = period
        if gather:
            log_warn('brian.SpikeGeneratorGroup', 'SpikeGeneratorGroup\'s gather keyword use is deprecated')
        fallback = False # fall back on old SpikeGeneratorThreshold or not
        if isinstance(spiketimes, list):
            # spiketimes is a list of (i,t)
            if len(spiketimes):
                idx, times = zip(*spiketimes)
            else:
                idx, times = [], []
            # the following try ... handles the case where spiketimes has index arrays
            # e.g spiketimes = [([0, 1], 0 * msecond), ([0, 1, 2], 2 * msecond)]
            # Notes:
            # - if there is always the same number of indices by array, its simple, it's just a matter of flattening
            # - if not, then it requires a for loop, and it's done in the except
            try:
                idx = array(idx, dtype = float)
                times = array(times, dtype = float)
                if idx.ndim > 1:
                    # simple case
                    times = tile(times.reshape((len(times), 1)), (idx.shape[1], 1)).flatten()
                    idx = idx.flatten()
            except ValueError:
                new_idx = []
                new_times = []
                for k, item in enumerate(idx):
                    if isinstance(item, list):
                        new_idx += item # append indices
                        new_times += [times[k]]*len(item)
                    else:
                        new_times += [times[k]]
                        new_idx += [item]
                idx = array(new_idx, dtype  = float)
                times = new_times
                times = array(times, dtype = float)
        elif isinstance(spiketimes, tuple):
            # spike times is a tuple with idx, times in arrays
            idx = spiketimes[0]
            times = spiketimes[1]
        elif isinstance(spiketimes, ndarray):
            # spiketimes is a ndarray, with first col is index and second time
            idx = spiketimes[:,0]
            times = spiketimes[:,1]
        else:
            log_warn('brian.SpikeGeneratorGroup', 'Using (slow) threshold because spiketimes is assumed to be a generator/iterator')
            # spiketimes is a callable object, so falling back on old SpikeGeneratorThreshold
            fallback = True

        if not fallback:
            thresh = FastSpikeGeneratorThreshold(N, idx, times, dt=clock.dt, period=period)
        else:
            thresh = SpikeGeneratorThreshold(N, spiketimes, period=period, sort=sort)
        
        if not hasattr(self, '_initialized'):
            NeuronGroup.__init__(self, N, model=LazyStateUpdater(), threshold=thresh, clock=clock)
            self._initialized = True
        else:
            self._threshold = thresh
 
    def reinit(self):
        super(SpikeGeneratorGroup, self).reinit()
        self._threshold.reinit()

    @property
    def spiketimes(self):
        return self._threshold.spiketimes
    
    @spiketimes.setter
    def spiketimes(self, values):
        self.__init__(self.N, values, period = self.period)

class FastSpikeGeneratorThreshold(Threshold):
    '''
    A faster version of the SpikeGeneratorThreshold where spikes are processed prior to the run (offline). It replaces the SpikeGeneratorThreshold as of 1.3.1.
    '''
    ## Notes:
    #  - N is ignored (should it not?)
    def __init__(self, N, addr, timestamps, dt = None, period=None):
        self.set_offsets(addr, timestamps, dt = dt)
        self.period = period
        self.dt = dt
        self.reinit()
        
    def set_offsets(self, I, T, dt = 1000):
        # Convert times into integers
        T = array(ceil(T/dt), dtype=int)
        # Put them into order
        # We use a field array to sort first by time and then by neuron index
        spikes = zeros(len(I), dtype=[('t', int), ('i', int)])
        spikes['t'] = T
        spikes['i'] = I
        spikes.sort(order=('t', 'i'))
        T = ascontiguousarray(spikes['t'])
        self.I = ascontiguousarray(spikes['i'])
        # Now for each timestep, we find the corresponding segment of I with
        # the spike indices for that timestep.
        # The idea of offsets is that the segment offsets[t]:offsets[t+1]
        # should give the spikes with time t, i.e. T[offsets[t]:offsets[t+1]]
        # should all be equal to t, and so then later we can return
        # I[offsets[t]:offsets[t+1]] at time t. It might take a bit of thinking
        # to see why this works. Since T is sorted, and bincount[i] returns the
        # number of elements of T equal to i, then j=cumsum(bincount(T))[t]
        # gives the first index in T where T[j]=t.
        if len(T):
            self.offsets = hstack((0, cumsum(bincount(T))))
        else:
            self.offsets = array([])
    
    def __call__(self, P):
        t = P.clock.t
        if self.period is not None:
            cp = int(t / self.period)
            if cp > self.curperiod:
                self.reinit()
                self.curperiod = cp
            t = t - cp * self.period
        dt = P.clock.dt
        t = int(round(t/dt))
        if t+1>=len(self.offsets):
            return array([], dtype=int)
        return self.I[self.offsets[t]:self.offsets[t+1]]
    
    def reinit(self):
        self.curperiod = -1
        
    @property
    def spiketimes(self):
        # this is a pain to do! retrieve spike times from offsets
        res = []
        for k in range(len(self.offsets)-1):
            idx = self.I[self.offsets[k]:self.offsets[k+1]]
            ts = [k*self.dt]*len(idx)
            res += zip(idx, ts)
        return res

class SpikeGeneratorThreshold(Threshold):
    """
    Old threshold object for the SpikeGeneratorGroup
    
    **Notes**

    This version of the SpikeGeneratorThreshold object is deprecated, since version 1.3.1 of Brian it has been replaced in most cases by the FastSpikeGeneratorThreshold. 
    This is kept only as a fallback object for when a SpikeGeneratorGroup object is initialized with a generator or an iterator object (see the doc for SpikeGeneratorGroup for more details). Please note that since this implementation is slower, using a static data structure as an input to a SpikeGeneratorGroup is advised.
    """
    def __init__(self, N, spiketimes, period=None, sort=True):
        self.set_spike_times(N, spiketimes, period=period, sort=sort)

    def reinit(self):
        def makeiter(obj):
            if callable(obj): return iter(obj())
            return iter(obj)
        self.spiketimeiter = makeiter(self.spiketimes)
        try:
            self.nextspikenumber, self.nextspiketime = self.spiketimeiter.next()
        except StopIteration:
            self.nextspiketime = None
            self.nextspikenumber = 0
        self.curperiod = -1

    def set_spike_times(self, N, spiketimes, period=None, sort=True):
        # N is the number of neurons, spiketimes is an iterable object of tuples (i,t) where
        # t is the spike time, and i is the neuron number. If spiketimes is a list or tuple,
        # then it will be sorted here.
        if isinstance(spiketimes, (list, tuple)) and sort:
            spiketimes = sorted(spiketimes, key=itemgetter(1))
        self.spiketimes = spiketimes
        self.N = N
        self.period = period
        self.reinit()
        
    def __call__(self, P):
        firing = zeros(self.N)
        t = P.clock.t
        if self.period is not None:
            cp = int(t / self.period)
            if cp > self.curperiod:
                self.reinit()
                self.curperiod = cp
            t = t - cp * self.period
        if isinstance(self.spiketimes, numpy.ndarray):
            t = float(t)
        while self.nextspiketime is not None and is_approx_less_than_or_equal(self.nextspiketime, t):
            if type(self.nextspikenumber)==int and firing[self.nextspikenumber]:
                log_warn('brian.SpikeGeneratorThreshold', 'Discarding multiple overlapping spikes')
            firing[self.nextspikenumber] = 1
            try:
                self.nextspikenumber, self.nextspiketime = self.spiketimeiter.next()
            except StopIteration:
                self.nextspiketime = None
        return where(firing)[0]

# The output of this function is fed into SpikeGeneratorGroup, consisting of
# time sorted pairs (t,i) where t is when neuron i fires
@check_units(t=second, n=1, sigma=second)
def PulsePacketGenerator(t, n, sigma):
    times = [pyrandom.gauss(t, sigma) for i in range(n)]
    times.sort()
    neuron = range(n)
    pyrandom.shuffle(neuron)
    return zip(neuron, times)


class PulsePacket(SpikeGeneratorGroup):
    """
    Fires a Gaussian distributed packet of n spikes with given spread
    
    **Initialised as:** :: 
    
        PulsePacket(t,n,sigma[,clock])
        
    with arguments:
    
    ``t``
        The mean firing time
    ``n``
        The number of spikes in the packet
    ``sigma``
        The standard deviation of the firing times.
    ``clock``
        The clock to use (omit to use default or local clock)
    
    **Methods**
    
    This class is derived from :class:`SpikeGeneratorGroup` and has all its
    methods as well as one additional method:
    
    .. method:: generate(t,n,sigma)
    
        Change the parameters and/or generate a new pulse packet.
    """
    @check_units(t=second, n=1, sigma=second)
    def __init__(self, t, n, sigma, clock=None):
        self.clock = guess_clock(clock)
        self.generate(t, n, sigma)

    def reinit(self):
        super(PulsePacket, self).reinit()
        self._threshold.reinit()
    @check_units(t=second, n=1, sigma=second)
    def generate(self, t, n, sigma):
        SpikeGeneratorGroup.__init__(self, n, PulsePacketGenerator(t, n, sigma), self.clock)

    def __repr__(self):
        return "Pulse packet neuron group"

class PoissonGroup(NeuronGroup):
    '''
    A group that generates independent Poisson spike trains.
    
    **Initialised as:** ::
    
        PoissonGroup(N,rates[,clock])
    
    with arguments:
    
    ``N``
        The number of neurons in the group
    ``rates``
        A scalar, array or function returning a scalar or array.
        The array should have the same length as the number of
        neurons in the group. The function should take one
        argument ``t`` the current simulation time.
    ``clock``
        The clock which the group will update with, do not
        specify to use the default clock.
    '''
    def __init__(self, N, rates=0 * hertz, clock=None):
        '''
        Initializes the group.
        P.rates gives the rates.
        '''
        NeuronGroup.__init__(self, N, model=LazyStateUpdater(), threshold=PoissonThreshold(),
                             clock=clock)
        if callable(rates): # a function is passed
            self._variable_rate = True
            self.rates = rates
            self._S0[0] = self.rates(self.clock.t)
        else:
            self._variable_rate = False
            self._S[0, :] = rates
            self._S0[0] = rates
        self.var_index = {'rate':0}

    def update(self):
        if self._variable_rate:
            self._S[0, :] = self.rates(self.clock.t)
        NeuronGroup.update(self)


class OfflinePoissonGroup(object): # This is weird, there is only an init method
    def __init__(self, N, rates, T):
        """
        Generates a Poisson group with N spike trains and given rates over the
        time window [0,T].
        """
        if isscalar(rates):
            rates = rates * ones(N)
        totalrate = sum(rates)
        isi = exponential(1 / totalrate, T * totalrate * 2)
        spikes = cumsum(isi)
        spikes = spikes[spikes <= T]
        neurons = randint(0, N, len(spikes))
        self.spiketimes = zip(neurons, spikes)


# Used in PoissonInput below
class EmptyGroup(object):
    def __init__(self, clock):
        self.clock = clock
    def get_spikes(self, delay):
        return None


class PoissonInput(Connection):
    """
    Adds a Poisson input to a NeuronGroup. Allows to efficiently simulate a large number of 
    independent Poisson inputs to a NeuronGroup variable, without simulating every synapse
    individually. The synaptic events are generated randomly during the simulation and
    are not preloaded and stored in memory (unless record=True is used).
    All the inputs must target the same variable, have the same frequency and same synaptic
    weight. You can use as many PoissonInput objects as you want, even targetting a same NeuronGroup.
    There is the possibility to consider time jitter in the presynaptic spikes, and
    synaptic unreliability. The inputs can also be recorded if needed. Finally, all
    neurons from the NeuronGroup receive independent realizations of Poisson spike trains,
    except if the keyword freeze=True is used, in which case all neurons receive the same
    Poisson input.
    
    **Initialised as:** ::
    
        PoissonInput(target[, N][, rate][, weight][, state][, jitter][, reliability][, copies][, record][, freeze])
    
    with arguments:
    
    ``target``
        The target :class:`NeuronGroup`
    ``N``
        The number of independent Poisson inputs
    ``rate``
        The rate of each Poisson process
    ``weight``
        The synaptic weight
    ``state``
        The name or the index of the synaptic variable of the :class:`NeuronGroup`      
    ``jitter``
        is ``None`` by default. There is the possibility to consider ``copies`` presynaptic
        spikes at each Poisson event, randomly shifted according to an exponential law
        with parameter ``jitter=taujitter`` (in second).
    ``reliability`` 
        is ``None`` by default. There is the possibility to consider ``copies`` presynaptic
        spikes at each Poisson event, where each of these spikes is unreliable, i.e. it occurs
        with probability ``jitter=alpha`` (between 0 and 1).
    ``copies``
        The number of copies of each Poisson event. This is identical to ``weight=copies*w``, except
        if ``jitter`` or ``reliability`` are specified.
    ``record``
        ``True`` if the input has to be recorded. In this case, the recorded events are
        stored in the ``recorded_events`` attribute, as a list of pairs ``(i,t)`` where ``i`` is the
        neuron index and ``t`` is the event time.
    ``freeze``
        ``True`` if the input must be the same for all neurons of the :class:`NeuronGroup`
    """    
    _record = []
    
    def __init__(self, target, N=None, rate=None, weight=None, state=None,
                  jitter=None, reliability=None, copies=1,
                  record=False, freeze=False):
        self.source = EmptyGroup(target.clock)
        self.target = target
        self.N = len(self.target)
        self.clock = target.clock
        self.delay = None
        self.iscompressed = True
        self.delays = None # delay to wait for the j-th synchronous spike to occur after the last sync event, for target neuron i
        self.lastevent = -inf * ones(self.N) # time of the last event for target neuron i
        self.events = []
        self.recorded_events = []

        self.n = N
        self.rate = rate
        self.w = weight
        self.var = state
        self._jitter = jitter
        
        if jitter is not None:
            self.delays = zeros((copies, self.N))
        
        self.reliability = reliability
        self.copies = copies
        self.record = record
        self.frozen = freeze
        
        if (jitter is not None) and (reliability is not None):
            raise Exception("Specifying both jitter and reliability is currently not supported.")

        if isinstance(state, str): # named state variable
            self.index = self.target.get_var_index(state)
        else:
            self.index = state

    @property
    def jitter(self):
        return self._jitter

    @jitter.setter
    def jitter(self, value):
        self._jitter = value
        if value is not None:
            self.delays = zeros((self.copies, self.N))

    def propagate(self, spikes):
        i = 0
        
        n = self.n
        f = self.rate
        w = self.w
        var = self.var
        jitter = self.jitter
        reliability = self.reliability
        record = self.record
        frozen = self.frozen
        state = self.index
        
        if (jitter==None) and (reliability==None):
            if frozen:
                rnd = binomial(n=n, p=f * self.clock.dt)
                self.target._S[state, :] += w * rnd
                if rnd > 0:
                    self.events.append(self.clock.t)
            else:
                rnd = binomial(n=n, p=f * self.clock.dt, size=(self.N))
                self.target._S[state, :] += w * rnd
                ind = nonzero(rnd>0)[0]
                if record and len(ind)>0:
                    self.recorded_events.append((ind[0], self.clock.t))
        elif (jitter is not None):
            p = self.copies
            taujitter = jitter
            if (p > 0) & (f > 0):
                k = binomial(n=n, p=f * self.clock.dt, size=(self.N)) # number of synchronous events here, for every target neuron
                syncneurons = (k > 0) # neurons with a syncronous event here
                self.lastevent[syncneurons] = self.clock.t
                if taujitter == 0.0:
                    self.delays[:, syncneurons] = zeros((p, sum(syncneurons)))
                else:
                    self.delays[:, syncneurons] = exponential(scale=taujitter, size=(p, sum(syncneurons)))
                # Delayed spikes occur now
                lastevent = tile(self.lastevent, (p, 1))
                b = (abs(self.clock.t - (lastevent + self.delays)) <= (self.clock.dt / 2) * ones((p, self.N))) # delayed spikes occurring now
                weff = sum(b, axis=0) * w
                self.target._S[state, :] += weff
        elif (reliability is not None):
            p = self.copies
            alpha = reliability
            if (p > 0) & (alpha > 0):
                weff = w * binomial(n=p, p=alpha)
                self.target._S[state, :] += weff * binomial(n=n, p=f * self.clock.dt, size=(self.N))

def _test():
    import doctest
    doctest.testmod()

if __name__ == "__main__":
    _test()