Skip to content

Specific Projections#

ANNarchy provides a set of predefined Projection objects to ease the definition of standard networks.

ANNarchy.core.SpecificProjection.CurrentInjection #

Bases: SpecificProjection

Inject current from a rate-coded population into a spiking population.

The pre-synaptic population must be be rate-coded, the post-synaptic one must be spiking, both must have the same size and no plasticity is allowed.

For each post-synaptic neuron, the current g_target will be set at each time step to the firing rate r of the pre-synaptic neuron with the same rank.

The projection must be connected with connect_current(), which takes no parameter and does not accept delays. It is equivalent to connect_one_to_one(weights=1).

Example:

inp = Population(100, Neuron(equations="r = sin(t)"))

pop = Population(100, Izhikevich)

proj = CurrentInjection(inp, pop, 'exc')
proj.connect_current()
Source code in /home/docs/checkouts/readthedocs.org/user_builds/annarchy/conda/latest/lib/python3.9/site-packages/ANNarchy/core/SpecificProjection.py
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
class CurrentInjection(SpecificProjection):
    """
    Inject current from a rate-coded population into a spiking population.

    The pre-synaptic population must be be rate-coded, the post-synaptic one must be spiking, both must have the same size and no plasticity is allowed.

    For each post-synaptic neuron, the current ``g_target`` will be set at each time step to the firing rate ``r`` of the pre-synaptic neuron with the same rank.

    The projection must be connected with ``connect_current()``, which takes no parameter and does not accept delays. It is equivalent to ``connect_one_to_one(weights=1)``.

    Example:

    ```python
    inp = Population(100, Neuron(equations="r = sin(t)"))

    pop = Population(100, Izhikevich)

    proj = CurrentInjection(inp, pop, 'exc')
    proj.connect_current()
    ```

    """
    def __init__(self, pre, post, target, name=None, copied=False):
        """
        :param pre: pre-synaptic population.
        :param post: post-synaptic population.
        :param target: type of the connection.
        """
        # Instantiate the projection
        SpecificProjection.__init__(self, pre, post, target, None, name, copied)

        # Check populations
        if not self.pre.neuron_type.type == 'rate':
            Global._error('The pre-synaptic population of a CurrentInjection must be rate-coded.')

        if not self.post.neuron_type.type == 'spike':
            Global._error('The post-synaptic population of a CurrentInjection must be spiking.')

        if not self.post.size == self.pre.size:
            Global._error('CurrentInjection: The pre- and post-synaptic populations must have the same size.')

        if Global._check_paradigm("cuda") and (isinstance(pre, PopulationView) or isinstance(post, PopulationView)):
            Global._error("CurrentInjection on GPUs is not allowed for PopulationViews")

        # Prevent automatic split of matrices
        self._no_split_matrix = True

    def _copy(self, pre, post):
        "Returns a copy of the population when creating networks. Internal use only."
        return CurrentInjection(pre=pre, post=post, target=self.target, name=self.name, copied=True)

    def _generate_st(self):
        # Generate the code
        self._specific_template['psp_code'] = """
        if (pop%(id_post)s._active) {
            for (int i=0; i<post_rank.size(); i++) {
                pop%(id_post)s.g_%(target)s[post_rank[i]] += pop%(id_pre)s.r[pre_rank[i][0]];
            }
        } // active
""" % { 'id_pre': self.pre.id, 'id_post': self.post.id, 'target': self.target}

    def _generate_omp(self):
        # Generate the code
        self._specific_template['psp_code'] = """
        if (pop%(id_post)s._active) {
            #pragma omp for
            for (int i=0; i<post_rank.size(); i++) {
                pop%(id_post)s.g_%(target)s[post_rank[i]] += pop%(id_pre)s.r[pre_rank[i][0]];
            }
        } // active
""" % { 'id_pre': self.pre.id, 'id_post': self.post.id, 'target': self.target}

    def _generate_cuda(self):
        """
        Generate the CUDA code.

        For a first implementation we take a rather simple approach:

        * We use only one block for the kernel and each thread computes
        one synapse/post-neuron entry (which is equal as we use one2one).

        * We use only the pre-synaptic firing rate, no other variables.

        * We ignore the synaptic weight.
        """
        ids = {
            'id_proj': self.id,
            'id_post': self.post.id,
            'id_pre': self.pre.id,
            'target': self.target,
            'float_prec': Global.config['precision']
        }

        self._specific_template['psp_body'] = """
__global__ void cu_proj%(id_proj)s_psp(int post_size, %(float_prec)s *pre_r, %(float_prec)s *g_%(target)s) {
    int n = threadIdx.x;

    while (n < post_size) {
        g_%(target)s[n] += pre_r[n];

        n += blockDim.x;
    }
}
""" % ids
        self._specific_template['psp_header'] = """__global__ void cu_proj%(id_proj)s_psp(int post_size, %(float_prec)s *pre_r, %(float_prec)s *g_%(target)s);""" % ids
        self._specific_template['psp_call'] = """
    cu_proj%(id_proj)s_psp<<< 1, 192 >>>(
        pop%(id_post)s.size,
        pop%(id_pre)s.gpu_r,
        pop%(id_post)s.gpu_g_%(target)s );
""" % ids

    def connect_current(self):
        return self.connect_one_to_one(weights=1.0)

__init__(pre, post, target, name=None, copied=False) #

Parameters:

  • pre

    pre-synaptic population.

  • post

    post-synaptic population.

  • target

    type of the connection.

Source code in /home/docs/checkouts/readthedocs.org/user_builds/annarchy/conda/latest/lib/python3.9/site-packages/ANNarchy/core/SpecificProjection.py
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
def __init__(self, pre, post, target, name=None, copied=False):
    """
    :param pre: pre-synaptic population.
    :param post: post-synaptic population.
    :param target: type of the connection.
    """
    # Instantiate the projection
    SpecificProjection.__init__(self, pre, post, target, None, name, copied)

    # Check populations
    if not self.pre.neuron_type.type == 'rate':
        Global._error('The pre-synaptic population of a CurrentInjection must be rate-coded.')

    if not self.post.neuron_type.type == 'spike':
        Global._error('The post-synaptic population of a CurrentInjection must be spiking.')

    if not self.post.size == self.pre.size:
        Global._error('CurrentInjection: The pre- and post-synaptic populations must have the same size.')

    if Global._check_paradigm("cuda") and (isinstance(pre, PopulationView) or isinstance(post, PopulationView)):
        Global._error("CurrentInjection on GPUs is not allowed for PopulationViews")

    # Prevent automatic split of matrices
    self._no_split_matrix = True

ANNarchy.core.SpecificProjection.DecodingProjection #

Bases: SpecificProjection

Decoding projection to transform spike trains into firing rates.

The pre-synaptic population must be a spiking population, while the post-synaptic one must be rate-coded.

Pre-synaptic spikes are accumulated for each post-synaptic neuron. A sliding window can be used to smoothen the results with the window parameter.

The decoded firing rate is accessible in the post-synaptic neurons with sum(target).

The projection can be connected using any method available in Projection (although all-to-all or many-to-one makes mostly sense). Delays are ignored.

The weight value allows to scale the firing rate: if you want a pre-synaptic firing rate of 100 Hz to correspond to a post-synaptic rate of 1.0, use w = 1./100..

Example:

pop1 = PoissonPopulation(1000, rates=100.)
pop2 = Population(1, Neuron(equations="r=sum(exc)"))
proj = DecodingProjection(pop1, pop2, 'exc', window=10.0)
proj.connect_all_to_all(1.0, force_multiple_weights=True)
Source code in /home/docs/checkouts/readthedocs.org/user_builds/annarchy/conda/latest/lib/python3.9/site-packages/ANNarchy/core/SpecificProjection.py
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
class DecodingProjection(SpecificProjection):
    """
    Decoding projection to transform spike trains into firing rates.

    The pre-synaptic population must be a spiking population, while the post-synaptic one must be rate-coded.

    Pre-synaptic spikes are accumulated for each post-synaptic neuron. A sliding window can be used to smoothen the results with the ``window`` parameter.

    The decoded firing rate is accessible in the post-synaptic neurons with ``sum(target)``.

    The projection can be connected using any method available in ``Projection`` (although all-to-all or many-to-one makes mostly sense). Delays are ignored.

    The weight value allows to scale the firing rate: if you want a pre-synaptic firing rate of 100 Hz to correspond to a post-synaptic rate of 1.0, use ``w = 1./100.``.

    Example:

    ```python
    pop1 = PoissonPopulation(1000, rates=100.)
    pop2 = Population(1, Neuron(equations="r=sum(exc)"))
    proj = DecodingProjection(pop1, pop2, 'exc', window=10.0)
    proj.connect_all_to_all(1.0, force_multiple_weights=True)
    ```

    """
    def __init__(self, pre, post, target, window=0.0, name=None, copied=False):
        """
        :param pre: pre-synaptic population.
        :param post: post-synaptic population.
        :param target: type of the connection.
        :param window: duration of the time window to collect spikes (default: dt).
        """
        # Instantiate the projection
        SpecificProjection.__init__(self, pre, post, target, None, name, copied)

        # Check populations
        if not self.pre.neuron_type.type == 'spike':
            Global._error('The pre-synaptic population of a DecodingProjection must be spiking.')

        if not self.post.neuron_type.type == 'rate':
            Global._error('The post-synaptic population of a DecodingProjection must be rate-coded.')

        # Process window argument
        if window == 0.0:
            window = Global.config['dt']
        self.window = window

        # Disable openMP post-synaptic matrix split
        self._no_split_matrix = True

        # Not on CUDA
        if Global._check_paradigm('cuda'):
            Global._error('DecodingProjections are not available on CUDA yet.')

    def _copy(self, pre, post):
        "Returns a copy of the population when creating networks. Internal use only."
        copied_proj = DecodingProjection(pre=pre, post=post, target=self.target, window=self.window, name=self.name, copied=True)
        copied_proj._no_split_matrix = True
        return copied_proj

    def _generate_st(self):
        # Generate the code
        self._specific_template['declare_additional'] = """
    // Window
    int window = %(window)s;
    std::deque< std::vector< %(float_prec)s > > rates_history ;
""" % { 'window': int(self.window/Global.config['dt']), 'float_prec': Global.config['precision'] }

        self._specific_template['init_additional'] = """
        rates_history = std::deque< std::vector< %(float_prec)s > >(%(window)s, std::vector< %(float_prec)s >(%(post_size)s, 0.0));
""" % { 'window': int(self.window/Global.config['dt']),'post_size': self.post.size, 'float_prec': Global.config['precision'] }

        self._specific_template['psp_code'] = """
        if (pop%(id_post)s._active) {
            std::vector< std::pair<int, int> > inv_post;
            std::vector< %(float_prec)s > rates = std::vector< %(float_prec)s >(%(post_size)s, 0.0);
            // Iterate over all incoming spikes
            for(int _idx_j = 0; _idx_j < pop%(id_pre)s.spiked.size(); _idx_j++){
                rk_j = pop%(id_pre)s.spiked[_idx_j];
                inv_post = inv_pre_rank[rk_j];
                nb_post = inv_post.size();
                // Iterate over connected post neurons
                for(int _idx_i = 0; _idx_i < nb_post; _idx_i++){
                    // Retrieve the correct indices
                    i = inv_post[_idx_i].first;
                    j = inv_post[_idx_i].second;

                    // Increase the post-synaptic conductance
                    rates[post_rank[i]] +=  %(weight)s;
                }
            }

            rates_history.push_front(rates);
            rates_history.pop_back();
            for(int i=0; i<post_rank.size(); i++){
                sum = 0.0;
                for(int step=0; step<window; step++){
                    sum += rates_history[step][post_rank[i]];
                }
                pop%(id_post)s._sum_%(target)s[post_rank[i]] += sum /float(window) * 1000. / dt / float(pre_rank[i].size());
            }
        } // active
""" % { 'id_proj': self.id, 'id_pre': self.pre.id, 'id_post': self.post.id, 'target': self.target,
        'post_size': self.post.size, 'float_prec': Global.config['precision'],
        'weight': "w" if self._has_single_weight() else "w[i][j]"}

        self._specific_template['psp_prefix'] = """
        int nb_post, i, j, rk_j, rk_post, rk_pre;
        %(float_prec)s sum;
""" % { 'float_prec': Global.config['precision'] }

    def _generate_omp(self):
        # Generate the code
        self._specific_template['declare_additional'] = """
    // Window
    int window = %(window)s;
    std::deque< std::vector< %(float_prec)s > > rates_history ;
""" % { 'window': int(self.window/Global.config['dt']), 'float_prec': Global.config['precision'] }

        self._specific_template['init_additional'] = """
        rates_history = std::deque< std::vector< %(float_prec)s > >(%(window)s, std::vector< %(float_prec)s >(%(post_size)s, 0.0));
""" % { 'window': int(self.window/Global.config['dt']),'post_size': self.post.size, 'float_prec': Global.config['precision'] }

        self._specific_template['psp_code'] = """
        #pragma omp single
        {
            if (pop%(id_post)s._active) {
                std::vector< std::pair<int, int> > inv_post;
                std::vector< %(float_prec)s > rates = std::vector< %(float_prec)s >(%(post_size)s, 0.0);
                // Iterate over all incoming spikes
                for(int _idx_j = 0; _idx_j < pop%(id_pre)s.spiked.size(); _idx_j++){
                    rk_j = pop%(id_pre)s.spiked[_idx_j];
                    inv_post = inv_pre_rank[rk_j];
                    nb_post = inv_post.size();
                    // Iterate over connected post neurons
                    for(int _idx_i = 0; _idx_i < nb_post; _idx_i++){
                        // Retrieve the correct indices
                        i = inv_post[_idx_i].first;
                        j = inv_post[_idx_i].second;

                        // Increase the post-synaptic conductance
                        rates[post_rank[i]] +=  %(weight)s;
                    }
                }

                rates_history.push_front(rates);
                rates_history.pop_back();
                for(int i=0; i<post_rank.size(); i++){
                    sum = 0.0;
                    for(int step=0; step<window; step++){
                        sum += rates_history[step][post_rank[i]];
                    }
                    pop%(id_post)s._sum_%(target)s[post_rank[i]] += sum /float(window) * 1000. / dt / float(pre_rank[i].size());
                }
            } // active
        }
""" % { 'id_proj': self.id, 'id_pre': self.pre.id, 'id_post': self.post.id, 'target': self.target,
        'post_size': self.post.size, 'float_prec': Global.config['precision'],
        'weight': "w" if self._has_single_weight() else "w[i][j]"}

        self._specific_template['psp_prefix'] = """
        int nb_post, i, j, rk_j, rk_post, rk_pre;
        %(float_prec)s sum;
""" % { 'float_prec': Global.config['precision'] }

    def _generate_cuda(self):
        raise Global.ANNarchyException("The DecodingProjection is not available on CUDA devices.", True)

__init__(pre, post, target, window=0.0, name=None, copied=False) #

Parameters:

  • pre

    pre-synaptic population.

  • post

    post-synaptic population.

  • target

    type of the connection.

  • window

    duration of the time window to collect spikes (default: dt).

Source code in /home/docs/checkouts/readthedocs.org/user_builds/annarchy/conda/latest/lib/python3.9/site-packages/ANNarchy/core/SpecificProjection.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
def __init__(self, pre, post, target, window=0.0, name=None, copied=False):
    """
    :param pre: pre-synaptic population.
    :param post: post-synaptic population.
    :param target: type of the connection.
    :param window: duration of the time window to collect spikes (default: dt).
    """
    # Instantiate the projection
    SpecificProjection.__init__(self, pre, post, target, None, name, copied)

    # Check populations
    if not self.pre.neuron_type.type == 'spike':
        Global._error('The pre-synaptic population of a DecodingProjection must be spiking.')

    if not self.post.neuron_type.type == 'rate':
        Global._error('The post-synaptic population of a DecodingProjection must be rate-coded.')

    # Process window argument
    if window == 0.0:
        window = Global.config['dt']
    self.window = window

    # Disable openMP post-synaptic matrix split
    self._no_split_matrix = True

    # Not on CUDA
    if Global._check_paradigm('cuda'):
        Global._error('DecodingProjections are not available on CUDA yet.')