Commit 23155049 authored by Johann Schröder's avatar Johann Schröder
Browse files

some changes to bindsnet files

parent 586faee9
This diff is collapsed.
+---+---------------------+--------------------------+--------+--------------------+-----------------------------+-----------------------+------------------+--------+------------+----------------+--------+------+-----+------+------+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| n | date | filename | device | runtime | accuracy (train, test) | samples (train, test) | network | epochs | batch_size | nu | layers | exc | inh | wmin | wmax | norm | transform |
+---+---------------------+--------------------------+--------+--------------------+-----------------------------+-----------------------+------------------+--------+------------+----------------+--------+------+-----+------+------+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 0 | 15 12 2021 15:54:54 | dvsgesture_supervised.py | cuda | 317.26551485061646 | (0.9117920148560817, 0.125) | (1077, 264) | DiehlAndCook2015 | 1 | 1 | [0.0001, 0.01] | 100 | 22.5 | 120 | 0.0 | 1.0 | 6553.6 | ['Downsample(time_factor=0.001,spatial_factor=1)', 'ToFrame(sensor_size=(128,128,2),time_window=100,event_count=None,n_time_bins=None,n_event_bins=None,overlap=0,include_incomplete=False)'] |
| 1 | 15 12 2021 16:35:17 | dvsgesture_supervised.py | cuda | 1644.0364136695862 | (0.1328125, 0.265625) | (128, 64) | DiehlAndCook2015 | 1 | 1 | [0.0001, 0.01] | 1100 | 22.5 | 120 | 0.0 | 1.0 | 6553.6 | ['Downsample(time_factor=0.001,spatial_factor=1)', 'ToFrame(sensor_size=(128,128,2),time_window=10,event_count=None,n_time_bins=None,n_event_bins=None,overlap=0,include_incomplete=False)'] |
+---+---------------------+--------------------------+--------+--------------------+-----------------------------+-----------------------+------------------+--------+------------+----------------+--------+------+-----+------+------+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
\ No newline at end of file
n, date, filename, device, runtime," accuracy (train, test)"," samples (train, test)", network, epochs, batch_size, nu, layers, exc, inh, wmin, wmax, norm, transform
0,15 12 2021 15:54:54,dvsgesture_supervised.py,cuda,317.26551485061646,"(0.9117920148560817, 0.125)","(1077, 264)",DiehlAndCook2015,1,1,"[0.0001, 0.01]",100,22.5,120,0.0,1.0,6553.6,"['Downsample(time_factor=0.001,spatial_factor=1)', 'ToFrame(sensor_size=(128,128,2),time_window=100,event_count=None,n_time_bins=None,n_event_bins=None,overlap=0,include_incomplete=False)']"
1,15 12 2021 16:35:17,dvsgesture_supervised.py,cuda,1644.0364136695862,"(0.1328125, 0.265625)","(128, 64)",DiehlAndCook2015,1,1,"[0.0001, 0.01]",1100,22.5,120,0.0,1.0,6553.6,"['Downsample(time_factor=0.001,spatial_factor=1)', 'ToFrame(sensor_size=(128,128,2),time_window=10,event_count=None,n_time_bins=None,n_event_bins=None,overlap=0,include_incomplete=False)']"
import csv
import datetime
import gc
from bindsnet.analysis.plotting import (plot_assignments, plot_performance,
plot_spikes,
plot_voltages,
plot_weights)
from custom_models import (DiehlAndCook2015,
TwoLayerNetwork,
TwoLayerNetworkIzhikevich)
from bindsnet.utils import (get_square_assignments,
get_square_weights)
from re import S
import sys
from bindsnet.analysis.plotting import (plot_assignments,
plot_performance,
plot_spikes,
plot_voltages,
plot_weights,
plot_input)
from bindsnet.evaluation import (all_activity,
assign_labels,
proportion_weighting)
from bindsnet.utils import get_square_assignments, get_square_weights
from custom_models import (DiehlAndCook2015, DiehlAndCook2015v2,
TwoLayerNetwork,
TwoLayerNetworkIzhikevich,
TwoLayerNetworkSimple)
from numpy.lib.utils import source
import tonic
import torch
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from bindsnet.evaluation import (all_activity,
assign_labels,
proportion_weighting)
from bindsnet.network.monitors import Monitor
import torchvision as tv
import torchvision.transforms.functional as F
import time
from prettytable.prettytable import from_csv
plt.style.use('science')
gc.collect()
torch.cuda.empty_cache()
transform = tonic.transforms.Compose(
[
# tonic.transforms.Denoise(filter_time=10000),
tonic.transforms.Downsample(time_factor=0.001),
tonic.transforms.ToFrame(n_time_bins=3,
include_incomplete=True),
tonic.transforms.Downsample(time_factor=0.001),
# tonic.transforms.Denoise(filter_time=1),
tonic.transforms.ToFrame(sensor_size=tonic.datasets.DVSGesture.sensor_size,
time_window=10,
# n_time_bins=n_time_bins,
# include_incomplete=True
),
]
)
epochs = 1
BATCH_SIZE = 1
# train samples: 1077, test samples: 264
train_subset_len = 128
test_subset_len = 64
BATCH_SIZE = 32
trainset = tonic.datasets.DVSGesture(save_to='../../data',
transform=transform,
trainset = tonic.datasets.DVSGesture(save_to='../data',
transform=transform,
train=True)
testset = tonic.datasets.DVSGesture(save_to='../../data',
testset = tonic.datasets.DVSGesture(save_to='../data',
transform=transform,
train=False)
# train samples: 1077, test samples: 264
train_subset_len = 1077
test_subset_len = 264
train_subset = torch.utils.data.random_split(
trainset, [train_subset_len, len(trainset)-train_subset_len])[0]
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
train_len = -(-train_subset_len//BATCH_SIZE)
test_len = -(-test_subset_len//BATCH_SIZE)
#Dataloader
train_loader = torch.utils.data.DataLoader(
dataset=trainset,
dataset=train_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True,
)
)
test_loader = torch.utils.data.DataLoader(
dataset=testset,
dataset=test_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True
)
train_subset = torch.utils.data.random_split(
trainset, [train_subset_len, len(trainset)-train_subset_len])[0]
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
train_len = train_subset_len/BATCH_SIZE
test_len = test_subset_len/BATCH_SIZE
)
dt = 1
time = None
device = torch.device("cuda")
t = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sensor_size = tonic.datasets.DVSGesture.sensor_size
input_size = np.product(sensor_size)
input_size = np.product(tonic.datasets.DVSGesture.sensor_size)
n_classes = len(trainset.classes)
exc: float = 22.5
inh: float = 120
exc = 22.5
inh = 120
nu = [1e-4, 1e-2]
n_neurons = 11**2
n_neurons = 1100
wmin = 0.0
wmax = 1.0
norm = input_size*0.2
epochs = 1
threshold = -52.0
plot = False
DiehlCook2015 = DiehlAndCook2015(
# network = TwoLayerNetworkSimple(
# n_inpt=input_size,
# shape=(2, 128, 128),
# n_neurons=n_neurons,
# dt=dt,
# wmin=wmin,
# wmax=wmax,
# nu=nu,
# norm=norm,
# reduction=None
# )
network = DiehlAndCook2015(
n_inpt=input_size,
inpt_shape=(2, 128, 128),
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
......@@ -95,216 +118,221 @@ DiehlCook2015 = DiehlAndCook2015(
nu=nu,
norm=norm,
inh=inh,
exc=exc
exc=exc,
theta_plus=0.05,
tc_theta_decay=1e7
)
TwoLayerLIF = TwoLayerNetwork(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm,
threshold=threshold,
batch_size=BATCH_SIZE,
reduction=None
)
spikes = {}
TwoLayerIzhikevich = TwoLayerNetworkIzhikevich(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm
)
def run_net(epochs,
batch_size,
device,
network,
train_loader,
test_loader,
n_neurons,
n_classes,
time):
source_monitor = Monitor(
obj=network.layers["X"],
state_vars=["s"],
time=time,
device=device
for layer in set(network.layers):
spikes[layer] = Monitor(
network.layers[layer], state_vars=["s"], time=t, device=device
)
network.add_monitor(spikes[layer], name="%s_spikes" % layer)
target_monitor = Monitor(
obj=network.layers["Y"],
state_vars=["s", "v"],
time=time,
device=device
voltages = {}
for layer in set(network.layers) - {"X"}:
voltages[layer] = Monitor(
network.layers[layer], state_vars=["v"], time=t, device=device
)
network.add_monitor(monitor=source_monitor, name="X")
network.add_monitor(monitor=target_monitor, name="Y")
network.to(device)
assignments = -torch.ones_like(torch.Tensor(n_neurons), device=device)
proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes), device=device)
rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes), device=device)
per_class = int(n_neurons / n_classes)
accuracy_train = {"all": [], "proportion": []}
acc_batch_train = []
acc_batch_test = []
acc_train = []
acc_test = []
n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
weights_im = None
assigns_im = None
perf_ax = None
inpt_axes = None
inpt_ims = None
acc_print = {"train": [], "test": []}
pbar = tqdm(total=epochs, colour='blue')
network.add_monitor(voltages[layer], name="%s_voltages" % layer)
network.to(device)
assignments = -torch.ones(n_neurons, device=device)
proportions = torch.zeros((n_neurons, n_classes), device=device)
rates = torch.zeros((n_neurons, n_classes), device=device)
per_class = int(n_neurons / n_classes)
acc_batch_train = []
acc_batch_test = []
acc_train = []
acc_test = []
n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
weights_im = None
assigns_im = None
perf_ax = None
inpt_axes = None
inpt_ims = None
acc = {"train": [], "test": []}
spike_ims, spike_axes = None, None
voltage_axes, voltage_ims = None, None
start_time = time.time()
pbar = tqdm(total=epochs, colour='blue')
for epoch in range(epochs):
#Training
network.train(mode=True)
for epoch in range(epochs):
#Training
for events, targets in tqdm(
train_loader, colour='blue', leave=False, total=int(train_len)):
time = int(events.shape[1])
b = int(events.shape[0])
labels = torch.empty(b, device=device)
spike_record = torch.zeros(b, time, n_neurons)
for idx, batch in enumerate(tqdm(events, colour='green', leave=False, total=b)):
all_activity_pred = all_activity(
spikes=spike_record.to(device),
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record.to(device),
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
accuracy_train["all"].append(
100 * torch.sum(labels.long().to(device) ==
all_activity_pred).item() / b)
assignments, proportions, rates = assign_labels(
spikes=spike_record.to(device),
labels=labels.to(device),
n_labels=n_classes,
rates=rates)
batch = batch.squeeze().view([time, -1]).to(device)
input = {"X": batch}
labels[idx] = targets[idx].item()
choice = np.random.choice(
int(n_neurons / n_classes), size=1, replace=False)
clamp = {"Y": per_class *
targets[idx].long() + torch.Tensor(choice).long()}
network.run(inputs=input, time=time, clamp=clamp)
spikes = {"X": source_monitor.get("s"), "Y": target_monitor.get("s")}
voltages = {"X": target_monitor.get("v")}
spike_record[idx] = spikes["Y"].view(time, n_neurons)
if plot:
inpt = input["X"].view(time, input_size).sum(
0).view(sensor_size)
input_exc_weights = network.connections[("X", "Y")].w
square_weights = get_square_weights(
weights=input_exc_weights.view(input_size, n_neurons), n_sqrt=n_sqrt, side=(128))
square_assignments = get_square_assignments(
assignments=assignments, n_sqrt=n_sqrt)
# Plots
# inpt_axes, inpt_ims = plot_input(
# batch,
# inpt,
# label=labels[idx],
# axes=inpt_axes,
# ims=inpt_ims)
weights_im = plot_weights(square_weights, im=weights_im)
assigns_im = plot_assignments(
square_assignments, im=assigns_im, classes=trainset.classes)
# perf_ax = plot_performance(acc_print, x_scale=time, ax=perf_ax)
plt.pause(1e-20)
network.reset_state_variables()
acc_batch_train.append(accuracy_train["all"][-1])
acc_train.append(np.mean(acc_batch_train))
print("Train Mean Accuracy: %.2f " % np.mean(acc_train))
#Testing
network.train(mode=False)
for events, targets in tqdm(
test_loader, leave=False, colour='red', total=int(test_len)):
b = int(events.shape[0])
time = int(events.shape[1])
spike_record = torch.zeros(1, int(time/dt), n_neurons, device=device)
accuracy_test = {"all": 0, "proportion": 0}
for idx, batch in enumerate(tqdm(events, leave=False, colour='green', total=b)):
inputs = {"X": batch.view(int(time/dt), 1, input_size).to(device)}
inputs = {k: v.cuda() for k, v in inputs.items()}
network.run(inputs=inputs, time=time, input_time_dim=1)
spikes = {"Y": target_monitor.get("s")}
spike_record[0] = spikes["Y"].squeeze()
label_tensor = torch.tensor(targets[idx], device=device)
all_activity_pred = all_activity(
spikes=spike_record,
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record,
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
accuracy_test["all"] += float(torch.sum(label_tensor.long()
== all_activity_pred).item())
print(100*accuracy_test["all"]/b)
network.reset_state_variables()
acc_batch_test.append(100 * accuracy_test["all"]/b)
acc_test.append(np.mean(acc_batch_test))
print("Test Mean Accuracy: %.2f " % np.mean(acc_test))
pbar.set_description_str("Train Mean Accuracy: %.2f, Test Mean Accuracy: %.2f " % (
np.mean(acc_train), np.mean(acc_test)))
pbar.update()
return acc_train, acc_test
print("Params: norm: [%.2f], nu: ([%f], [%f]), thres: [%.1fmV]" %
(norm, nu[0], nu[1], threshold))
train_acc, test_acc = run_net(
epochs=epochs,
batch_size=BATCH_SIZE,
device=device,
network=TwoLayerLIF,
train_loader=train_loader,
test_loader=test_loader,
n_neurons=n_neurons,
n_classes=n_classes,
time=time)
print(train_acc, test_acc)
final_acc = {"train": train_acc, "test": test_acc}
plot_performance(performances=final_acc, ax=epochs)
# fig = plt.figure(figsize=(10, 5))
# plt.plot(train_acc)
# plt.plot(test_acc)
# plt.title("Accuracy Curves")
# plt.legend(["Train Accuracy", "Test Accuracy"])
# plt.xlabel("Iteration")
# plt.ylabel("Accuracy")
# plt.show()
correct_train = 0
correct_test = 0
for events, targets in tqdm(
train_loader, colour='green', leave=False, total=train_len):
t = int(events.shape[0])
spike_record = torch.zeros((1, t, n_neurons), device=device)
input = {"X": events.to(device)}
choice = np.random.choice(int(n_neurons / n_classes), size=1, replace=False)
clamp = {"Y": per_class * targets.long() + torch.Tensor(choice).long()}
network.run(inputs=input, time=t, clamp=clamp)
spike_record = spikes["Y"].get("s").permute(1, 0, 2)
all_activity_pred = all_activity(
spikes=spike_record.to(device),
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record.to(device),
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
correct_train += torch.sum(targets.long().to(device) ==
all_activity_pred).item()
assignments, proportions, rates = assign_labels(
spikes=spike_record.to(device),
labels=targets.to(device),
n_labels=n_classes,
rates=rates)
if plot:
# inpt = input["X"].view(time, input_size).sum(
# 0).view(sensor_size)
input_exc_weights = network.connections[("X", "Y")].w
square_weights = get_square_weights(
weights=input_exc_weights.view(input_size, n_neurons), n_sqrt=n_sqrt, side=(128*2, 128))
square_assignments = get_square_assignments(
assignments=assignments, n_sqrt=n_sqrt)
# Plots
# inpt_axes, inpt_ims = plot_input(
# batch,
# inpt,
# label=labels[idx],
# axes=inpt_axes,
# ims=inpt_ims)
weights_im = plot_weights(square_weights, im=weights_im)
# weights_im = plot_weights(input_exc_weights, im=weights_im)
assigns_im = plot_assignments(
square_assignments, im=assigns_im, classes=trainset.classes)
# voltage_ims, voltage_axes = plot_voltages(
# voltages, ims=voltage_ims, axes=voltage_axes, plot_type="line"
# )
# spike_ims, spike_axes = plot_spikes(
# spikes, ims=spike_ims, axes=spike_axes)
# perf_ax = plot_performance(acc_print, x_scale=time, ax=perf_ax)
plt.pause(1e-24)
network.reset_state_variables()
acc["train"].append(correct_train/len(train_loader.dataset))
#Testing
network.train(mode=False)
for events, targets in tqdm(test_loader, leave=False, colour='red', total=test_len):
t = int(events.shape[0])
spike_record = torch.zeros((1, t, n_neurons), device=device)
input = {"X": events.to(device)}
network.run(inputs=input, time=t, input_time_dim=1)
spike_record = spikes["Y"].get("s").permute(1, 0, 2)
all_activity_pred = all_activity(
spikes=spike_record.to(device),
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record.to(device),
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
correct_test += torch.sum(targets.long().to(device) ==
all_activity_pred).item()
network.reset_state_variables()
acc["test"].append(correct_test/len(test_loader.dataset))
pbar.update()
runtime = time.time() - start_time
date = datetime.datetime.now()
filetype = '.png'
d = str(date.day) + "_" + str(date.month) + "_" + \
str(date.year) + "_" + str(date.time())
d = d.split('.')[0]
scriptname = sys.argv[0].split('.')[0]
tf_str = str(transform).replace(" ", "").splitlines(keepends=False)[1:-1]
plot_path = "./plots/" + scriptname + "/acc" + "_" + d + filetype
np.save("./plots/" + scriptname + "/acc_data" +
"_" + d + '.npy', [acc["train"], acc["test"]])
final_acc = {"train": acc["train"], "test": acc["test"]}
network_str = str(network).replace("("," ").split(' ')[0]
plot_performance(performances=final_acc, save=plot_path)
filename_raw = 'dvsgesture_results_raw.csv'
filename_pretty = 'dvsgesture_results_pretty.csv'
fields = ['n',
' date',
' filename',
' device',
' runtime',
' accuracy (train, test)',
' samples (train, test)',
' network',
' epochs',
' batch_size',
' nu',
' layers',
' exc',
' inh',
' wmin',
' wmax',
' norm',
' transform']
num_rows = 0
for r in open(filename_raw):
num_rows += 1
if num_rows != 0:
num_rows -= 1
rows = [[num_rows,
d.replace('_', ' '),
sys.argv[0].split('.')[0] + '.py',
device,
runtime,
(acc["train"][-1], acc["test"][-1]),
(len(train_loader.dataset), len(test_loader.dataset)),
network_str,
epochs,
BATCH_SIZE,
nu,
n_neurons,
exc,
inh,
wmin,
wmax,
norm,
tf_str]]
with open(filename_raw, 'a') as csvfile: