9
9
http://www.izhikevich.org/publications/spikes.pdf
10
10
"""
11
11
12
+ REGULAR_SPIKING_PARAMS = {'a' : 0.02 , 'b' : 0.20 , 'c' : - 65.0 , 'd' : 8.00 }
13
+ INTRINSICALLY_BURSTING_PARAMS = {'a' : 0.02 , 'b' : 0.20 , 'c' : - 55.0 , 'd' : 4.00 }
14
+ CHATTERING_PARAMS = {'a' : 0.02 , 'b' : 0.20 , 'c' : - 50.0 , 'd' : 2.00 }
15
+ FAST_SPIKING_PARAMS = {'a' : 0.10 , 'b' : 0.20 , 'c' : - 65.0 , 'd' : 2.00 }
16
+ THALAMO_CORTICAL_PARAMS = {'a' : 0.02 , 'b' : 0.25 , 'c' : - 65.0 , 'd' : 0.05 }
17
+ RESONATOR_PARAMS = {'a' : 0.10 , 'b' : 0.25 , 'c' : - 65.0 , 'd' : 2.00 }
18
+ LOW_THRESHOLD_SPIKING_PARAMS = {'a' : 0.02 , 'b' : 0.25 , 'c' : - 65.0 , 'd' : 2.00 }
19
+
12
20
13
21
class Neuron (object ):
14
- def __init__ (self , bias , a , b , c , d , time_step_msec = 1.0 ):
22
+ def __init__ (self , bias , a , b , c , d ):
15
23
"""
16
24
a, b, c, d are the parameters of this model.
17
25
a: the time scale of the recovery variable.
@@ -20,20 +28,19 @@ def __init__(self, bias, a, b, c, d, time_step_msec=1.0):
20
28
d: after-spike reset of the recovery variable.
21
29
22
30
The following parameters produce some known spiking behaviors:
23
- Regular spiking: a = 0.02, b = 0.2, c = -65.0, d = 8.0
24
- Intrinsically bursting: a = 0.02, b = 0.2, c = -55.0, d = 4.0
25
- Chattering: a = 0.02, b = 0.2, c = -50.0, d = 2.0
26
- Fast spiking: a = 0.1, b = 0.2, c = -65.0, d = 2.0
27
- Thalamo-cortical: a = 0.02, b = 0.25, c = -65.0, d = 0.05
28
- Resonator: a = 0.1, b = 0.25, c = -65.0, d = 2.0
29
- Low-threshold spiking: a = 0.02, b = 0.25, c = -65, d = 2.0
31
+ Regular spiking: a = 0.02, b = 0.2, c = -65.0, d = 8.0
32
+ Intrinsically bursting: a = 0.02, b = 0.2, c = -55.0, d = 4.0
33
+ Chattering: a = 0.02, b = 0.2, c = -50.0, d = 2.0
34
+ Fast spiking: a = 0.1, b = 0.2, c = -65.0, d = 2.0
35
+ Thalamo-cortical: a = 0.02, b = 0.25, c = -65.0, d = 0.05
36
+ Resonator: a = 0.1, b = 0.25, c = -65.0, d = 2.0
37
+ Low-threshold spiking: a = 0.02, b = 0.25, c = -65.0 , d = 2.0
30
38
"""
31
39
self .a = a
32
40
self .b = b
33
41
self .c = c
34
42
self .d = d
35
43
self .bias = bias
36
- self .dt_msec = time_step_msec
37
44
38
45
# Membrane potential (millivolts).
39
46
self .v = self .c
@@ -44,9 +51,9 @@ def __init__(self, bias, a, b, c, d, time_step_msec=1.0):
44
51
self .output = 0.0
45
52
self .current = self .bias
46
53
47
- def advance (self ):
54
+ def advance (self , dt_msec ):
48
55
"""
49
- Advances simulation time by 1 ms .
56
+ Advances simulation time by the given time step in milliseconds .
50
57
51
58
v' = 0.04 * v^2 + 5v + 140 - u + I
52
59
u' = a * (b * v - u)
@@ -59,9 +66,9 @@ def advance(self):
59
66
# TODO: The need to catch overflows indicates that the current method is
60
67
# not stable for all possible network configurations and states.
61
68
try :
62
- self .v += 0.5 * self . dt_msec * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + self .current )
63
- self .v += 0.5 * self . dt_msec * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + self .current )
64
- self .u += self . dt_msec * self .a * (self .b * self .v - self .u )
69
+ self .v += 0.5 * dt_msec * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + self .current )
70
+ self .v += 0.5 * dt_msec * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + self .current )
71
+ self .u += dt_msec * self .a * (self .b * self .v - self .u )
65
72
except OverflowError :
66
73
# Reset without producing a spike.
67
74
self .v = self .c
@@ -97,18 +104,19 @@ def __init__(self, neurons, inputs, outputs, connections):
97
104
self .currents = [0.0 ] * (1 + max_node )
98
105
99
106
def set_inputs (self , inputs ):
107
+ """Assign input voltages and reset currents to zero."""
100
108
assert len (inputs ) == len (self .inputs )
101
109
for i , v in zip (self .inputs , inputs ):
102
110
self .currents [i ] = 0.0
103
111
self .neurons [i ].current = 0.0
104
112
self .neurons [i ].output = v
105
113
106
114
def reset (self ):
107
- # Reset all neurons.
115
+ """ Reset all neurons to their default state."""
108
116
for i , n in self .neurons .items ():
109
117
n .reset ()
110
118
111
- def advance (self ):
119
+ def advance (self , dt_msec ):
112
120
# Initialize all non-input neuron currents to the bias value.
113
121
for i , n in self .neurons .items ():
114
122
if i not in self .inputs :
@@ -121,12 +129,12 @@ def advance(self):
121
129
for i , n in self .neurons .items ():
122
130
if i not in self .inputs :
123
131
n .current = self .currents [i ]
124
- n .advance ()
132
+ n .advance (dt_msec )
125
133
126
134
return [self .neurons [i ].output for i in self .outputs ]
127
135
128
136
129
- def create_phenotype (genome , a , b , c , d , time_step_msec = 1.0 ):
137
+ def create_phenotype (genome , a , b , c , d ):
130
138
""" Receives a genome and returns its phenotype (a neural network) """
131
139
132
140
neurons = {}
@@ -135,7 +143,7 @@ def create_phenotype(genome, a, b, c, d, time_step_msec=1.0):
135
143
for ng in genome .node_genes .values ():
136
144
# TODO: It seems like we should have a separate node gene implementation
137
145
# that optionally encodes more (all?) of the Izhikevich model parameters.
138
- neurons [ng .ID ] = Neuron (ng .bias , a , b , c , d , time_step_msec )
146
+ neurons [ng .ID ] = Neuron (ng .bias , a , b , c , d )
139
147
if ng .type == 'INPUT' :
140
148
inputs .append (ng .ID )
141
149
elif ng .type == 'OUTPUT' :
0 commit comments