@@ -1212,8 +1212,8 @@ def __init__(
12121212 self .r = torch .rand (n )
12131213 self .a = 0.02 * torch .ones (n )
12141214 self .b = 0.2 * torch .ones (n )
1215- self .c = - 65.0 + 15 * (self .r ** 2 )
1216- self .d = 8 - 6 * (self .r ** 2 )
1215+ self .c = - 65.0 + 15 * (self .r ** 2 )
1216+ self .d = 8 - 6 * (self .r ** 2 )
12171217 self .S = 0.5 * torch .rand (n , n )
12181218 self .excitatory = torch .ones (n ).byte ()
12191219
@@ -1282,8 +1282,8 @@ def forward(self, x: torch.Tensor) -> None:
12821282 )
12831283
12841284 # Apply v and u updates.
1285- self .v += self .dt * 0.5 * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + x )
1286- self .v += self .dt * 0.5 * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + x )
1285+ self .v += self .dt * 0.5 * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + x )
1286+ self .v += self .dt * 0.5 * (0.04 * self .v ** 2 + 5 * self .v + 140 - self .u + x )
12871287 self .u += self .dt * self .a * (self .b * self .v - self .u )
12881288
12891289 # Voltage clipping to lower bound.
@@ -1518,7 +1518,7 @@ def set_batch_size(self, batch_size) -> None:
15181518
15191519 def AlphaKernel (self , dt ):
15201520 t = torch .arange (0 , self .res_window_size , dt )
1521- kernelVec = (1 / (self .tau ** 2 )) * t * torch .exp (- t / self .tau )
1521+ kernelVec = (1 / (self .tau ** 2 )) * t * torch .exp (- t / self .tau )
15221522 return torch .flip (kernelVec , [0 ])
15231523
15241524 def AlphaKernelSLAYER (self , dt ):
0 commit comments