Commit cc414553 authored by Johann Schröder's avatar Johann Schröder
Browse files

added lif to slayer

parent f5d40b1c
......@@ -48,8 +48,8 @@ train_subset = torch.utils.data.random_split(
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
BATCH_SIZE = 32
EPOCHS = 50
BATCH_SIZE = 128
EPOCHS = 10
LR = 0.002
INPUT_FEATURES = np.product(tonic.datasets.SHD.sensor_size)
HIDDEN_FEATURES = 100
......@@ -147,7 +147,7 @@ def train(model, device, train_loader, optimizer):
losses = []
correct = 0
for data, target in tqdm(train_loader, leave=False, colour='green', total=int(train_len)):
data, target = data.float().to(device), torch.LongTensor(target).to(device)
data, target = data.to(device), torch.LongTensor(target).to(device)
optimizer.zero_grad()
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
......@@ -168,7 +168,7 @@ def test(model, device, test_loader):
with torch.no_grad():
for data, target in tqdm(
test_loader, colour='red', leave=False, total=int(test_len)):
data, target = data.float().to(device), torch.LongTensor(target).to(device)
data, target = data.to(device), torch.LongTensor(target).to(device)
output = model(data)
test_loss += torch.nn.functional.nll_loss(
output, target, reduction="sum").item() # sum up batch loss
......
......@@ -17,10 +17,12 @@ plt.style.use('science')
gc.collect()
torch.cuda.empty_cache()
drop_event = tonic.transforms.DropEvent(0.5)
transform = tonic.transforms.Compose(
[
tonic.transforms.Downsample(time_factor=0.001),
tonic.transforms.DropEvent(p=0.1),
# tonic.transforms.DropEvent(p=0.1),
tonic.transforms.ToFrame(sensor_size=tonic.datasets.SHD.sensor_size,
time_window=1,
),
......@@ -148,6 +150,7 @@ def train(model, device, train_loader, optimizer):
correct = 0
for data, target in tqdm(
train_loader, leave=False, colour='green', total=int(train_len)):
data = drop_event(data)
data, target = data.float().to(device), torch.LongTensor(target).to(device)
optimizer.zero_grad()
output = model(data)
......
......@@ -32,7 +32,7 @@ transform = tonic.transforms.Compose(
]
)
BATCH_SIZE = 16
BATCH_SIZE = 8
trainset = tonic.datasets.DVSGesture(
save_to='../data', train=True, transform=transform)
testset = tonic.datasets.DVSGesture(
......@@ -40,7 +40,7 @@ testset = tonic.datasets.DVSGesture(
EPOCHS = 50
lr = 0.002
input_size = np.product(tonic.datasets.DVSGesture.sensor_size)
input_size = tonic.datasets.DVSGesture.sensor_size
# input_size = np.product(surface_dimension) * 2
sensor_size = tonic.datasets.DVSGesture.sensor_size
hidden_size = 100
......
import tonic
from torch.utils.data import Dataset, DataLoader
import torch
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from slayerPytorch.src.learningStats import learningStats
from prettytable.prettytable import from_csv
import gc
import time
import csv
import slayerSNN as snn
import sys
import os
CURRENT_TEST_DIR = os.getcwd()
gc.collect()
torch.cuda.empty_cache()
plt.style.use('science')
surface_dimension = [45, 45]
transform = tonic.transforms.Compose(
[
tonic.transforms.Downsample(time_factor=0.001),
tonic.transforms.ToFrame(sensor_size=tonic.datasets.DVSGesture.sensor_size,
time_window=10,
),
# tonic.transforms.Denoise(filter_time=10),
# tonic.transforms.ToTimesurface(sensor_size=tonic.datasets.DVSGesture.sensor_size,
# tau=5000,
# surface_dimensions=surface_dimension,
# decay='exp'
# )
]
)
BATCH_SIZE = 8
trainset = tonic.datasets.DVSGesture(
save_to='../data', train=True, transform=transform)
testset = tonic.datasets.DVSGesture(
save_to='../data', train=False, transform=transform)
EPOCHS = 50
lr = 0.002
input_size = tonic.datasets.DVSGesture.sensor_size
# input_size = [surface_dimension, 2]
sensor_size = tonic.datasets.DVSGesture.sensor_size
hidden_size = 100
n_classes = len(trainset.classes)
layers = (hidden_size)
netParams = snn.params('network_dvsgesture_lif.yaml')
# Network definition
class Network(torch.nn.Module):
def __init__(self, netParams):
super(Network, self).__init__()
# Initialize slayer
slayer = snn.loihi(netParams['neuron'], netParams['simulation'])
self.slayer = slayer
# Define network functions
# The commented line below should be used if the input spikes were not reshaped
self.fc1 = slayer.dense(input_size, hidden_size)
self.fc2 = slayer.dense(hidden_size, n_classes)
def forward(self, spikeInput):
# Both set of definitions are equivalent. The uncommented version is much faster.
# spikeLayer1 = self.slayer.spike(self.fc1(spikeInput))
# spikeLayer2 = self.slayer.spike(self.fc2(spikeLayer1))
spikeLayer1 = self.slayer.spikeLoihi(self.slayer.psp(self.fc1(spikeInput)))
spikeLayer2 = self.slayer.spikeLoihi(self.slayer.psp(self.fc2(spikeLayer1)))
return spikeLayer2
# return spikeInput, spikeLayer1, spikeLayer2
if __name__ == '__main__':
# Define the cuda device to run the code on.
device = torch.device('cuda')
# Create network instance.
net = Network(netParams).to(device)
# Create snn loss instance.
error = snn.loss(netParams, snn.loihi).to(device)
# Define optimizer module.
optimizer = torch.optim.Adam(net.parameters(), lr = lr)
# train samples: 1077, test samples: 264
train_subset_len = 1077
test_subset_len = 264
train_subset = torch.utils.data.random_split(
trainset, [train_subset_len, len(trainset)-train_subset_len])[0]
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
# Dataset and dataLoader instances.
train_loader = torch.utils.data.DataLoader(dataset=train_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True)
# Learning stats instance.
stats = learningStats()
# Main loop
for epoch in range(EPOCHS):
tSt = datetime.now()
start_time = time.time()
for i, (events, label) in enumerate(iter(train_loader), 0):
target = torch.zeros((len(label), len(trainset.classes), 1, 1, 1))
input = events.permute([1, 2, 3, 4, 0])
for idx, l in enumerate(label):
target[idx, l.item(), ...] = 1
# Move the input and target to correct GPU.
input = input.float().to(device)
target = target.to(device)
# Forward pass of the network.
output = net.forward(input)
# Gather the training stats.
stats.training.correctSamples += torch.sum(
snn.predict.getClass(output) == label).data.item()
stats.training.numSamples += len(label)
# Calculate loss.
loss = error.numSpikes(output, target)
# Reset gradients to zero.
optimizer.zero_grad()
# Backward pass of the network.
loss.backward()
# Update weights.
optimizer.step()
# Gather training loss stats.
stats.training.lossSum += loss.cpu().data.item()
# Display training stats.
stats.print(epoch, i, (datetime.now() - tSt).total_seconds())
# Testing loop.
# Same steps as Training loops except loss backpropagation and weight update.
for i, (events, label) in enumerate(iter(test_loader), 0):
target = torch.zeros((len(label), len(testset.classes), 1, 1, 1))
input = events.permute([1, 2, 3, 4, 0])
for idx, l in enumerate(label):
target[idx, l.item(), ...] = 1
input = input.float().to(device)
target = target.to(device)
# print(input.shape, target.shape, label.shape)
output = net.forward(input)
stats.testing.correctSamples += torch.sum(
snn.predict.getClass(output) == label ).data.item()
stats.testing.numSamples += len(label)
loss = error.numSpikes(output, target)
stats.testing.lossSum += loss.cpu().data.item()
stats.print(epoch, i)
# Update stats.
stats.update()
runtime = time.time() - start_time
date = datetime.now()
filetype = '.png'
d = str(date.day) + "_" + str(date.month) + "_" + \
str(date.year) + "_" + str(date.time())
d = d.split('.')[0]
scriptname = sys.argv[0].split('.')[0]
tf_str = str(transform).replace(" ", "").splitlines(keepends=False)[1:-1]
try:
tf_str[1] = tf_str[1].replace(",decay='exp'", "")
except:
pass
# Plot the results.
# Learning loss
fig = plt.figure(facecolor="w", figsize=(10, 5))
plt.semilogy(stats.training.lossLog, label='Training')
plt.semilogy(stats.testing.lossLog, label='Testing')
plt.title('Loss Curves')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig("./dvsgesture/plots/" + scriptname +
"/loss" + "_" + d + filetype)
np.save("./dvsgesture/plots/" + scriptname + "/loss_data" +
"_" + d + '.npy', [stats.training.lossLog, stats.testing.lossLog])
# Learning accuracy
fig = plt.figure(facecolor="w", figsize=(10, 5))
plt.plot(stats.training.accuracyLog, label='Training')
plt.plot(stats.testing.accuracyLog, label='Testing')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig("./dvsgesture/plots/" + scriptname +
"/acc" + "_" + d + filetype)
np.save("./dvsgesture/plots/" + scriptname + "/acc_data" +
"_" + d + '.npy', [stats.training.accuracyLog, stats.testing.accuracyLog])
filename_raw = 'slayer_dvsgesture_lif_results_raw.csv'
filename_pretty = 'slayer_dvsgesture_lif_results_pretty.csv'
fields = ['n',
' date',
' filename',
' device',
' runtime',
' accuracy (train, test)',
' loss (train, test)',
' epochs',
' batch_size',
' lr',
' layers',
' tSample',
' type',
' vThMant',
' vDecay',
' iDecay',
' refDelay',
' wgtExp',
' tauRho',
' scaleRho',
' tgtSpikeRegion',
' tgtSpikeCount',
' transform']
num_rows = 0
for r in open(filename_raw):
num_rows += 1
if num_rows != 0:
num_rows -= 1
rows = [[num_rows,
d.replace('_', ' '),
sys.argv[0].split('.')[0] + '.py',
device,
runtime,
(stats.training.accuracyLog[-1], stats.testing.accuracyLog[-1]),
(stats.training.lossLog[-1], stats.testing.lossLog[-1]),
EPOCHS,
BATCH_SIZE,
lr,
layers,
netParams['simulation']['tSample'],
netParams['neuron']['type'],
netParams['neuron']['vThMant'],
netParams['neuron']['vDecay'],
netParams['neuron']['iDecay'],
netParams['neuron']['refDelay'],
netParams['neuron']['wgtExp'],
netParams['neuron']['tauRho'],
netParams['neuron']['scaleRho'],
netParams['training']['error']['tgtSpikeRegion'],
netParams['training']['error']['tgtSpikeCount'],
tf_str]]
with open(filename_raw, 'a') as csvfile:
csvwriter = csv.writer(csvfile)
if num_rows == 0:
csvwriter.writerow(fields)
csvwriter.writerows(rows)
with open(filename_raw) as fp:
table = from_csv(fp)
f = open(filename_pretty, "w")
f.write(table.get_string())
f.close()
import tonic
from torch.utils.data import Dataset, DataLoader
import torch
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from slayerPytorch.src.learningStats import learningStats
from prettytable.prettytable import from_csv
import gc
import time
import csv
import slayerSNN as snn
import sys
import os
CURRENT_TEST_DIR = os.getcwd()
gc.collect()
torch.cuda.empty_cache()
plt.style.use('science')
surface_dimension = [45, 45]
transform = tonic.transforms.Compose(
[
tonic.transforms.Downsample(time_factor=0.001),
tonic.transforms.ToFrame(sensor_size=tonic.datasets.DVSGesture.sensor_size,
time_window=10,
),
# tonic.transforms.Denoise(filter_time=10),
# tonic.transforms.ToTimesurface(sensor_size=tonic.datasets.DVSGesture.sensor_size,
# tau=5000,
# surface_dimensions=surface_dimension,
# decay='exp'
# )
]
)
BATCH_SIZE = 8
trainset = tonic.datasets.DVSGesture(
save_to='../data', train=True, transform=transform)
testset = tonic.datasets.DVSGesture(
save_to='../data', train=False, transform=transform)
EPOCHS = 50
lr = 0.002
input_size = tonic.datasets.DVSGesture.sensor_size
# input_size = [surface_dimension, 2]
sensor_size = tonic.datasets.DVSGesture.sensor_size
hidden_size_l1 = 500
hidden_size_l2 = 100
n_classes = len(trainset.classes)
layers = (hidden_size_l1, hidden_size_l2)
netParams = snn.params('network_dvsgesture_lif.yaml')
# Network definition
class Network(torch.nn.Module):
def __init__(self, netParams):
super(Network, self).__init__()
# Initialize slayer
slayer = snn.loihi(netParams['neuron'], netParams['simulation'])
self.slayer = slayer
# Define network functions
# The commented line below should be used if the input spikes were not reshaped
self.fc1 = slayer.dense(input_size, hidden_size_l1)
self.fc2 = slayer.dense(hidden_size_l1, hidden_size_l2)
self.fc3 = slayer.dense(hidden_size_l2, n_classes)
def forward(self, spikeInput):
# Both set of definitions are equivalent. The uncommented version is much faster.
# spikeLayer1 = self.slayer.spike(self.fc1(spikeInput))
# spikeLayer2 = self.slayer.spike(self.fc2(spikeLayer1))
spikeLayer1 = self.slayer.spikeLoihi(self.slayer.psp(self.fc1(spikeInput)))
spikeLayer2 = self.slayer.spikeLoihi(self.slayer.psp(self.fc2(spikeLayer1)))
spikeLayer3 = self.slayer.spikeLoihi(self.slayer.psp(self.fc3(spikeLayer2)))
return spikeLayer3
# return spikeInput, spikeLayer1, spikeLayer2
if __name__ == '__main__':
# Define the cuda device to run the code on.
device = torch.device('cuda')
# Create network instance.
net = Network(netParams).to(device)
# Create snn loss instance.
error = snn.loss(netParams, snn.loihi).to(device)
# Define optimizer module.
optimizer = torch.optim.Adam(net.parameters(), lr = 0.002)
# train samples: 1077, test samples: 264
train_subset_len = 1077
test_subset_len = 264
train_subset = torch.utils.data.random_split(
trainset, [train_subset_len, len(trainset)-train_subset_len])[0]
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
# Dataset and dataLoader instances.
train_loader = torch.utils.data.DataLoader(dataset=train_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True)
# Learning stats instance.
stats = learningStats()
# Main loop
for epoch in range(EPOCHS):
tSt = datetime.now()
start_time = time.time()
for i, (events, label) in enumerate(iter(train_loader), 0):
target = torch.zeros((len(label), len(trainset.classes), 1, 1, 1))
input = events.permute([1, 2, 3, 4, 0])
for idx, l in enumerate(label):
target[idx, l.item(), ...] = 1
# Move the input and target to correct GPU.
input = input.float().to(device)
target = target.to(device)
# Forward pass of the network.
output = net.forward(input)
# Gather the training stats.
stats.training.correctSamples += torch.sum(
snn.predict.getClass(output) == label).data.item()
stats.training.numSamples += len(label)
# Calculate loss.
loss = error.numSpikes(output, target)
# Reset gradients to zero.
optimizer.zero_grad()
# Backward pass of the network.
loss.backward()
# Update weights.
optimizer.step()
# Gather training loss stats.
stats.training.lossSum += loss.cpu().data.item()
# Display training stats.
stats.print(epoch, i, (datetime.now() - tSt).total_seconds())
# Testing loop.
# Same steps as Training loops except loss backpropagation and weight update.
for i, (events, label) in enumerate(iter(test_loader), 0):
input = events.permute([1, 2, 3, 4, 0])
for idx, l in enumerate(label):
target[idx, l.item(), ...] = 1
input = input.float().to(device)
target = target.to(device)
output = net.forward(input)
stats.testing.correctSamples += torch.sum(
snn.predict.getClass(output) == label ).data.item()
stats.testing.numSamples += len(label)
loss = error.numSpikes(output, target)
stats.testing.lossSum += loss.cpu().data.item()
stats.print(epoch, i)
# Update stats.
stats.update()
runtime = time.time() - start_time
date = datetime.now()
filetype = '.png'
d = str(date.day) + "_" + str(date.month) + "_" + \
str(date.year) + "_" + str(date.time())
d = d.split('.')[0]
scriptname = sys.argv[0].split('.')[0]
tf_str = str(transform).replace(" ", "").splitlines(keepends=False)[1:-1]
try:
tf_str[1] = tf_str[1].replace(",decay='exp'", "")
except:
pass
# Plot the results.
# Learning loss
fig = plt.figure(facecolor="w", figsize=(10, 5))
plt.semilogy(stats.training.lossLog, label='Training')
plt.semilogy(stats.testing.lossLog, label='Testing')
plt.title('Loss Curves')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig("./dvsgesture/plots/" + scriptname +
"/loss" + "_" + d + filetype)
np.save("./dvsgesture/plots/" + scriptname + "/loss_data" +
"_" + d + '.npy', [stats.training.lossLog, stats.testing.lossLog])
# Learning accuracy
fig = plt.figure(facecolor="w", figsize=(10, 5))
plt.plot(stats.training.accuracyLog, label='Training')
plt.plot(stats.testing.accuracyLog, label='Testing')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig("./dvsgesture/plots/" + scriptname +
"/acc" + "_" + d + filetype)
np.save("./dvsgesture/plots/" + scriptname + "/acc_data" +
"_" + d + '.npy', [stats.training.accuracyLog, stats.testing.accuracyLog])
filename_raw = 'slayer_dvsgesture_lif_results_raw.csv'
filename_pretty = 'slayer_dvsgesture_lif_results_pretty.csv'
fields = ['n',
' date',
' filename',
' device',
' runtime',
' accuracy (train, test)',
' loss (train, test)',
' epochs',
' batch_size',
' lr',
' layers',
' tSample',
' type',
' vThMant',
' vDecay',
' iDecay',