Commit b2ed3307 authored by Johann Schröder's avatar Johann Schröder
Browse files

added csv, npy and plot export

parent c958c92b
bindsnet/data/
norse/data/
slayer/data
snntorch/data/
slayer/slayerPytorch
frameworks/
data
dataset
.vscode
bindsnet/__pycache__
\ No newline at end of file
This diff is collapsed.
import gc
from bindsnet.analysis.plotting import (plot_assignments, plot_performance,
plot_spikes,
plot_voltages,
plot_weights)
from custom_models import (DiehlAndCook2015,
TwoLayerNetwork,
TwoLayerNetworkIzhikevich)
from bindsnet.utils import (get_square_assignments,
get_square_weights)
from numpy.lib.utils import source
import tonic
import torch
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from bindsnet.evaluation import (all_activity,
assign_labels,
proportion_weighting)
from bindsnet.network.monitors import Monitor
plt.style.use('science')
gc.collect()
torch.cuda.empty_cache()
transform = tonic.transforms.Compose(
[
# tonic.transforms.Denoise(filter_time=10000),
tonic.transforms.Downsample(time_factor=0.001),
tonic.transforms.ToFrame(n_time_bins=3,
include_incomplete=True),
]
)
BATCH_SIZE = 32
trainset = tonic.datasets.DVSGesture(save_to='../../data',
transform=transform,
train=True)
testset = tonic.datasets.DVSGesture(save_to='../../data',
transform=transform,
train=False)
# train samples: 1077, test samples: 264
train_subset_len = 1077
test_subset_len = 264
#Dataloader
train_loader = torch.utils.data.DataLoader(
dataset=trainset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset=testset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True
)
train_subset = torch.utils.data.random_split(
trainset, [train_subset_len, len(trainset)-train_subset_len])[0]
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
train_len = train_subset_len/BATCH_SIZE
test_len = test_subset_len/BATCH_SIZE
dt = 1
time = None
device = torch.device("cuda")
sensor_size = tonic.datasets.DVSGesture.sensor_size
input_size = np.product(sensor_size)
n_classes = len(trainset.classes)
exc: float = 22.5
inh: float = 120
nu = [1e-4, 1e-2]
n_neurons = 11**2
wmin = 0.0
wmax = 1.0
norm = input_size*0.2
epochs = 1
threshold = -52.0
plot = False
DiehlCook2015 = DiehlAndCook2015(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm,
inh=inh,
exc=exc
)
TwoLayerLIF = TwoLayerNetwork(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm,
threshold=threshold,
batch_size=BATCH_SIZE,
reduction=None
)
TwoLayerIzhikevich = TwoLayerNetworkIzhikevich(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm
)
def run_net(epochs,
batch_size,
device,
network,
train_loader,
test_loader,
n_neurons,
n_classes,
time):
source_monitor = Monitor(
obj=network.layers["X"],
state_vars=["s"],
time=time,
device=device
)
target_monitor = Monitor(
obj=network.layers["Y"],
state_vars=["s", "v"],
time=time,
device=device
)
network.add_monitor(monitor=source_monitor, name="X")
network.add_monitor(monitor=target_monitor, name="Y")
network.to(device)
assignments = -torch.ones_like(torch.Tensor(n_neurons), device=device)
proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes), device=device)
rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes), device=device)
per_class = int(n_neurons / n_classes)
accuracy_train = {"all": [], "proportion": []}
acc_batch_train = []
acc_batch_test = []
acc_train = []
acc_test = []
n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
weights_im = None
assigns_im = None
perf_ax = None
inpt_axes = None
inpt_ims = None
acc_print = {"train": [], "test": []}
pbar = tqdm(total=epochs, colour='blue')
network.train(mode=True)
for epoch in range(epochs):
#Training
for events, targets in tqdm(
train_loader, colour='blue', leave=False, total=int(train_len)):
time = int(events.shape[1])
b = int(events.shape[0])
labels = torch.empty(b, device=device)
spike_record = torch.zeros(b, time, n_neurons)
for idx, batch in enumerate(tqdm(events, colour='green', leave=False, total=b)):
all_activity_pred = all_activity(
spikes=spike_record.to(device),
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record.to(device),
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
accuracy_train["all"].append(
100 * torch.sum(labels.long().to(device) ==
all_activity_pred).item() / b)
assignments, proportions, rates = assign_labels(
spikes=spike_record.to(device),
labels=labels.to(device),
n_labels=n_classes,
rates=rates)
batch = batch.squeeze().view([time, -1]).to(device)
input = {"X": batch}
labels[idx] = targets[idx].item()
choice = np.random.choice(
int(n_neurons / n_classes), size=1, replace=False)
clamp = {"Y": per_class *
targets[idx].long() + torch.Tensor(choice).long()}
network.run(inputs=input, time=time, clamp=clamp)
spikes = {"X": source_monitor.get("s"), "Y": target_monitor.get("s")}
voltages = {"X": target_monitor.get("v")}
spike_record[idx] = spikes["Y"].view(time, n_neurons)
if plot:
inpt = input["X"].view(time, input_size).sum(
0).view(sensor_size)
input_exc_weights = network.connections[("X", "Y")].w
square_weights = get_square_weights(
weights=input_exc_weights.view(input_size, n_neurons), n_sqrt=n_sqrt, side=(128))
square_assignments = get_square_assignments(
assignments=assignments, n_sqrt=n_sqrt)
# Plots
# inpt_axes, inpt_ims = plot_input(
# batch,
# inpt,
# label=labels[idx],
# axes=inpt_axes,
# ims=inpt_ims)
weights_im = plot_weights(square_weights, im=weights_im)
assigns_im = plot_assignments(
square_assignments, im=assigns_im, classes=trainset.classes)
# perf_ax = plot_performance(acc_print, x_scale=time, ax=perf_ax)
plt.pause(1e-20)
network.reset_state_variables()
acc_batch_train.append(accuracy_train["all"][-1])
acc_train.append(np.mean(acc_batch_train))
print("Train Mean Accuracy: %.2f " % np.mean(acc_train))
#Testing
network.train(mode=False)
for events, targets in tqdm(
test_loader, leave=False, colour='red', total=int(test_len)):
b = int(events.shape[0])
time = int(events.shape[1])
spike_record = torch.zeros(1, int(time/dt), n_neurons, device=device)
accuracy_test = {"all": 0, "proportion": 0}
for idx, batch in enumerate(tqdm(events, leave=False, colour='green', total=b)):
inputs = {"X": batch.view(int(time/dt), 1, input_size).to(device)}
inputs = {k: v.cuda() for k, v in inputs.items()}
network.run(inputs=inputs, time=time, input_time_dim=1)
spikes = {"Y": target_monitor.get("s")}
spike_record[0] = spikes["Y"].squeeze()
label_tensor = torch.tensor(targets[idx], device=device)
all_activity_pred = all_activity(
spikes=spike_record,
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record,
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
accuracy_test["all"] += float(torch.sum(label_tensor.long()
== all_activity_pred).item())
print(100*accuracy_test["all"]/b)
network.reset_state_variables()
acc_batch_test.append(100 * accuracy_test["all"]/b)
acc_test.append(np.mean(acc_batch_test))
print("Test Mean Accuracy: %.2f " % np.mean(acc_test))
pbar.set_description_str("Train Mean Accuracy: %.2f, Test Mean Accuracy: %.2f " % (
np.mean(acc_train), np.mean(acc_test)))
pbar.update()
return acc_train, acc_test
print("Params: norm: [%.2f], nu: ([%f], [%f]), thres: [%.1fmV]" %
(norm, nu[0], nu[1], threshold))
train_acc, test_acc = run_net(
epochs=epochs,
batch_size=BATCH_SIZE,
device=device,
network=TwoLayerLIF,
train_loader=train_loader,
test_loader=test_loader,
n_neurons=n_neurons,
n_classes=n_classes,
time=time)
print(train_acc, test_acc)
final_acc = {"train": train_acc, "test": test_acc}
plot_performance(performances=final_acc, ax=epochs)
# fig = plt.figure(figsize=(10, 5))
# plt.plot(train_acc)
# plt.plot(test_acc)
# plt.title("Accuracy Curves")
# plt.legend(["Train Accuracy", "Test Accuracy"])
# plt.xlabel("Iteration")
# plt.ylabel("Accuracy")
# plt.show()
import gc
from bindsnet.analysis.plotting import (plot_assignments,
plot_performance,
plot_spikes,
plot_voltages,
plot_weights,
plot_input)
from bindsnet.evaluation import (all_activity,
assign_labels,
proportion_weighting)
from bindsnet.utils import get_square_assignments, get_square_weights
from custom_models import (DiehlAndCook2015,
TwoLayerNetwork,
TwoLayerNetworkIzhikevich, TwoLayerNetworkSimple)
from numpy.lib.utils import source
import tonic
import torch
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from bindsnet.network.monitors import Monitor
plt.style.use('science')
gc.collect()
torch.cuda.empty_cache()
transform = tonic.transforms.Compose(
[
tonic.transforms.Downsample(time_factor=0.001), #NMNIST
# tonic.transforms.Denoise(filter_time=10000),
tonic.transforms.ToFrame(sensor_size=tonic.datasets.NMNIST.sensor_size,
time_window=1,
include_incomplete=True)
]
)
epochs = 15
BATCH_SIZE = 128
#NMNIST Train Samples: 70000, Test Samples 10000
train_subset_len = 1280
test_subset_len = 512
first_saccade_only = True
trainset = tonic.datasets.NMNIST(save_to='../data',
transform=transform,
first_saccade_only=first_saccade_only,
train=True)
testset = tonic.datasets.NMNIST(save_to='../data',
transform=transform,
first_saccade_only=first_saccade_only,
train=False)
train_subset = torch.utils.data.random_split(
trainset, [train_subset_len, len(trainset)-train_subset_len])[0]
test_subset = torch.utils.data.random_split(
testset, [test_subset_len, len(testset)-test_subset_len])[0]
train_len = train_subset_len/BATCH_SIZE
test_len = test_subset_len/BATCH_SIZE
#Dataloader
train_loader = torch.utils.data.DataLoader(
dataset=train_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_subset,
batch_size=BATCH_SIZE,
collate_fn=tonic.collation.PadTensors(),
shuffle=True
)
dt = 1
time = None
device = torch.device("cuda")
sensor_size = tonic.datasets.NMNIST.sensor_size
input_size = np.product(tonic.datasets.NMNIST.sensor_size)
n_classes = len(trainset.classes)
exc: float = 22.5
inh: float = 17.5
nu = [1e-4, 1e-2]
n_neurons = 100
wmin = 0.0
wmax = 1.0
norm = input_size*0.15
threshold = -52.0
plot = True
TwoLayerSimple = TwoLayerNetworkSimple(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm,
reduction=None
)
DiehlCook2015 = DiehlAndCook2015(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm,
inh=inh,
exc=exc,
reduction=None
)
TwoLayerLIF = TwoLayerNetwork(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm,
reduction=None
)
TwoLayerIzhikevich = TwoLayerNetworkIzhikevich(
n_inpt=input_size,
n_neurons=n_neurons,
dt=dt,
wmin=wmin,
wmax=wmax,
nu=nu,
norm=norm
)
def run_net(epochs,
batch_size,
device,
network,
train_loader,
test_loader,
n_neurons,
n_classes,
time):
source_monitor = Monitor(
obj=network.layers["X"],
state_vars=["s"],
time=time,
device=device
)
target_monitor = Monitor(
obj=network.layers["Y"],
state_vars=["s", "v"],
time=time,
device=device
)
network.add_monitor(monitor=source_monitor, name="X")
network.add_monitor(monitor=target_monitor, name="Y")
network.to(device)
assignments = -torch.ones_like(torch.Tensor(n_neurons), device=device)
proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes), device=device)
rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes), device=device)
per_class = int(n_neurons / n_classes)
accuracy_train = {"all": [], "proportion": []}
acc_batch_train = []
acc_batch_test = []
acc_train = []
acc_test = []
n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
weights_im = None
assigns_im = None
perf_ax = None
inpt_axes = None
inpt_ims = None
acc_print = {"train": [], "test": []}
pbar = tqdm(total=epochs, colour='blue')
for epoch in range(epochs):
network.train(mode=True)
#Training
for events, targets in tqdm(train_loader, colour='blue', leave=False, total=int(train_len)):
events = events.permute([1, 0, 2, 3, 4])
time = int(events.shape[1])
b = int(events.shape[0])
labels = torch.empty(b, device=device)
spike_record = torch.zeros(b, time, n_neurons)
for idx, batch in enumerate(tqdm(events, colour='green', leave=False, total=b)):
all_activity_pred = all_activity(
spikes=spike_record.to(device),
assignments=assignments,
n_labels=n_classes)
proportion_pred = proportion_weighting(
spikes=spike_record.to(device),
assignments=assignments,
proportions=proportions,
n_labels=n_classes)
accuracy_train["all"].append(
100 * torch.sum(labels.long().to(device) ==
all_activity_pred).item() / b)
# print(
# "\nAll activity accuracy: %.2f (last), %.2f (average), %.2f (best)"
# % (accuracy_train["all"][-1], np.mean(accuracy_train["all"]), np.max(accuracy_train["all"]))
# )
assignments, proportions, rates = assign_labels(
spikes=spike_record.to(device),
labels=labels.to(device),
n_labels=n_classes,
rates=rates)
batch = batch.view([time, -1]).to(device)
input = {"X": batch}
labels[idx] = targets[idx].item()
choice = np.random.choice(
int(n_neurons / n_classes), size=1, replace=False)
clamp = {"Y": per_class *
targets[idx].long() + torch.Tensor(choice).long()}
network.run(inputs=input, time=time, clamp=clamp)
spikes = {"X": source_monitor.get("s"), "Y": target_monitor.get("s")}
voltages = {"X": target_monitor.get("v")}
spike_record[idx] = spikes["Y"].view(time, n_neurons)
if plot:
inpt = input["X"].view(time, input_size).sum(
0).view(sensor_size)
input_exc_weights = network.connections[("X", "Y")].w
square_weights = get_square_weights(
weights=input_exc_weights.view(input_size, n_neurons), n_sqrt=n_sqrt, side=(68, 34))
square_assignments = get_square_assignments(
assignments=assignments, n_sqrt=n_sqrt)
# Plots
# inpt_axes, inpt_ims = plot_input(
# batch,
# inpt,
# label=labels[idx],
# axes=inpt_axes,
# ims=inpt_ims)
weights_im = plot_weights(square_weights, im=weights_im)
assigns_im = plot_assignments(
square_assignments, im=assigns_im, classes=trainset.classes)
# perf_ax = plot_performance(acc_print, x_scale=time, ax=perf_ax)
plt.pause(1e-20)
network.reset_state_variables()
acc_batch_train.append(accuracy_train["all"][-1])
acc_train.append(np.mean(acc_batch_train))
print("Train Mean Accuracy: %.2f " % np.mean(acc_train))
#Testing
network.train(mode=False)
for events, targets in tqdm(test_loader, leave=False, colour='red', total=int(test_len)):
events = events.permute([1, 0, 2, 3, 4])
b = int(events.shape[0])
time = int(events.shape[1])
spike_record = torch.zeros(1, int(time/dt), n_neurons, device=device)
accuracy_test = {"all": 0, "proportion": 0}
for idx, batch in enumerate(tqdm(events, leave=False, colour='green', total=b)):
inputs = {"X": batch.view(int(time/dt), 1, input_size).to(device)}