Skip to content
Snippets Groups Projects
Commit a8000170 authored by Olivier Bertrand's avatar Olivier Bertrand
Browse files

Add 'navipy/forecasting/' from commit '18102ce6'

git-subtree-dir: navipy/forecasting
git-subtree-mainline: 5ad952a7
git-subtree-split: 18102ce6
parents 5ad952a7 18102ce6
No related branches found
No related tags found
No related merge requests found
Showing
with 48709 additions and 0 deletions
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Modified by Alexander Schulz and Benjamin Paaßen in 2017
% ( {aschulz,bpaassen}@techfak.uni-bielefeld.de)
classdef ESN < Learner
%ESN Summary of this class goes here
% Detailed explanation goes here
%
%
properties
hidDim = 10; % number of neurons in the hidden layer
wInp = []; % weights from input layer to hidden layer
wRes = []; % weights from hidden layer to hidden layer
wOut = []; % weights from hidden layer to output layer
bias = []; % bias parameters for activation function
a = []; % activation of the hidden layer
%aNew = []; % temporal activation, weighted with dt and added to old activation
h = []; % set by the act function
h_old = [];
y = []; % is this really needed?
h_init = []; % initial hidden state. used if use_init_h == 1
use_init_h = 0; % variable indicating whether after each
% subsequence the initial state should be
% resumed
collectH = 0; % specifies if the hidden states should be saved
Hcoll = []; % memory for saving hidden states
% statistics for incremental learning
HTH = []; % statistic to store H' * H
HTY = []; % statistic to store H' * Y
universRes = 1; % use Tino's 'Cycle Reservoir with Jumps' (CRJ)
% or a standard random reservoir
universResPars =... % params for the CRJ reservoir: the absolute
0.1 * [1 1 1 50]; % magnitude of input weights, the circular and
% jump connection strengths r_c and r_j and the
% jump distance between neurons
linearESN = 0; % use a linear ESN, i.e. no sigmoid functions
useReLU = 0; % use tanh or a ReLU activation function
psdRes = 0; % use a psd reservoir. good with RelUs?
dt = 1; % factor for weighting the contribution of the new reservoir activation
inpScale = 1; % scaling for input weights
biasScale = 0; % scaling for bias
inpDensity = 1; % density of input weights matrix
resDensity = 0.2; % density of reservoir weights matrix
reg = 1e-6; % regularization parameter for ridge regression
specRad = 0.95; % spectral radius for the reservoir weights matrix
woSteps = 200; % washout steps applied to the beginning of each sequence
convergeSteps = 0; % if >0 the net is fed with the first data vector convergeSteps times before train/apply
%batchSize = 1000; % number of samples used for a minibatch
end
methods
function l = ESN(inpDim, outDim, spec)
l = l@Learner(inpDim, outDim, spec);
end
function updateFromJSON(l, json)
fields = fieldnames(json.esn);
for f=1:numel(fields)
l.(fields{f}) = json.esn.(fields{f});
end
end
function init(l, X)
if l.universRes == 0
% use a random reservoir
if l.inpDensity == 1
l.wInp = l.inpScale * (2 * rand(l.hidDim,l.inpDim) - ones(l.hidDim,l.inpDim));
else
l.wInp = createSparseMatrix(l.hidDim, l.inpDim, l.inpDensity, l.inpScale);
end
%l.wRes = 2 * rand(l.hidDim,l.hidDim) - ones(l.hidDim,l.hidDim);
if (l.resDensity > 0)
if ~l.psdRes
l.wRes = createSparseMatrix(l.hidDim, l.hidDim, l.resDensity, 1);
else
useVariant = 3;
if useVariant == 1
% variant 1: compute a scalar product.
% It is not sparse
l.wRes = createSparseMatrix(l.hidDim, l.hidDim, l.resDensity, 1);
% use psd matrices with ReLU activations
l.wRes = l.wRes * l.wRes' + eye(l.hidDim);
elseif useVariant == 2
% variant 2: use sprandsym. Quote from the doc:
% It has a great deal of topological and
% algebraic structure.
l.wRes = sprandsym(l.hidDim, l.resDensity, (1+rand(l.hidDim,1))/2);
elseif useVariant == 3
% variant 3: set symmetric random values
% there are l.hidDim*(l.hidDim-1)/2 elements in the
% upper triangle of a matrix.
% set random values for off diagonal elements
randPos = randperm(round(l.hidDim*(l.hidDim-1)/2));
nonZeroE = round(l.resDensity*l.hidDim*(l.hidDim-1)/2);
randVals1 = zeros(round(l.hidDim*(l.hidDim-1)/2),1);
randVals1(randPos(1:nonZeroE)) = randu(nonZeroE,1);
% set random values for off diagonal elements
randPos = randperm(l.hidDim);
nonZeroE = round(l.resDensity*l.hidDim);
randVals2 = zeros(l.hidDim,1);
randVals2(randPos(1:nonZeroE)) = randu(nonZeroE,1);
l.wRes = sparse(squareform(randVals1) + diag(randVals2));
l.wRes(1:l.hidDim+1:end) = l.wRes(1:l.hidDim+1:end) ...
- eigs(l.wRes, 1,'sa');
end
end
% old scaling
%sr = max(abs(eigs(l.wRes)));
%l.wRes = (l.specRad / sr) .* l.wRes;
% use the effective spectral radius for leaky integrator
sr = max(abs(eigs(l.dt*l.wRes + (1-l.dt)*eye(l.hidDim))));
l.wRes = ((l.specRad -(1-l.dt)) / (sr-(1-l.dt))) .* l.wRes;
else
l.wRes = zeros(l.hidDim);
end
else
% use Peter Tino's CRJ
% set the input weights
currPath = mfilename('fullpath');
currPath = currPath(1:end-3);
piDecExpan = loadjson([currPath 'pi_8000.json']);
piDecExpan = piDecExpan.pi8000(1:l.hidDim*l.inpDim);
tmpInp = (-1) * l.universResPars(1) * (piDecExpan < 4.5);
tmpInp = tmpInp + l.universResPars(1) * (piDecExpan > 4.5);
l.wInp = reshape(tmpInp, l.hidDim, l.inpDim);
clear tmpInp piDecExpan;
% set the reservoir weights
l.wRes = spalloc(l.hidDim, l.hidDim, ...
l.hidDim + 2*floor(l.hidDim/l.universResPars(4)));
% set the circular connection weights
circ_idx = 2:l.hidDim+1:l.hidDim^2;
l.wRes(circ_idx) = l.universResPars(2);
l.wRes(1, l.hidDim) = l.universResPars(2);
% set the jump weights
nextToLast = (lower(l.hidDim/l.universResPars(4))-2)*l.universResPars(4) + 1;
jump_idx = (1:l.universResPars(4):nextToLast)';
jump_idx = [jump_idx+l.universResPars(4) jump_idx];
if mod(l.hidDim, l.universResPars(4)) == 0
jump_idx(end+1, :) = [1 jump_idx(end,1)];
else
jump_idx(end+1, :) = [jump_idx(end,1)+l.universResPars(4) jump_idx(end,1)];
end
jump_idx1 = (jump_idx(:,2)-1)*l.hidDim + jump_idx(:,1);
jump_idx2 = (jump_idx(:,1)-1)*l.hidDim + jump_idx(:,2);
l.wRes(jump_idx1) = l.universResPars(3);
l.wRes(jump_idx2) = l.universResPars(3);
clear circ_idx jump_idx jump_idx1 jump_idx2 nextToLast
end
l.wOut = zeros(l.hidDim, l.outDim);
l.bias = l.biasScale * (2 * rand(l.hidDim,1) - ones(l.hidDim,1));
l.h = zeros(l.hidDim,1);
l.h_old = zeros(l.hidDim,1);
l.a = zeros(size(l.h));
%l.aNew = zeros(size(l.h));
l.y = zeros(1,l.outDim);
%not required
%if nargin > 1
% l.normalizeIO(X);
%end
end
function train(l, X, Y)
if iscell(X)
% check if the input normalization is already set
if isempty(l.inpOffset)
l.normalizeIO(cell2mat(X), cell2mat(Y));
end
l.HTH = zeros(l.hidDim, l.hidDim);
l.HTY = zeros(l.hidDim, l.outDim);
numSequences = length(X);
for i=1:numSequences
X{i} = range2norm(X{i}, l.inpRange, l.inpOffset);
Y{i} = range2norm(Y{i}, l.outRange, l.outOffset);
if l.convergeSteps > 0
convergenceInput = repmat(X{i}(1,:), l.convergeSteps, 1);
l.calcHiddenStates(convergenceInput);
end
if l.use_init_h
l.h = l.h_init;
end
H = l.calcHiddenStates(X{i});
if l.woSteps > size(X{i},1)
warning(['washout longer than sequence ' num2str(i)]);
return;
end
H = H(l.woSteps+1:end,:);
l.HTH = l.HTH + H' * H;
l.HTY = l.HTY + H' * Y{i}(l.woSteps+1:end,:);
end
HTH = l.HTH + diag(repmat(l.reg, 1, l.hidDim));
l.wOut = HTH\l.HTY;
else
% check if the input normalization is already set
if isempty(l.inpOffset)
[X, Y] = l.normalizeIO(X, Y);
else
if l.inpNormalization
X = range2norm(X, l.inpRange, l.inpOffset);
end
if l.outNormalization
Y = range2norm(Y, l.outRange, l.outOffset);
end
end
if l.convergeSteps > 0
convergenceInput = repmat(X(1,:), l.convergeSteps, 1);
l.calcHiddenStates(convergenceInput);
end
if l.use_init_h
l.h = l.h_init;
end
numSamples = size(X,1);
H = l.calcHiddenStates(X);
if l.woSteps > numSamples
warning('washout longer than sequence');
return;
end
Y = Y(l.woSteps+1:end,:);
H = H(l.woSteps+1:end,:);
l.HTH = H' * H;
l.HTY = H' * Y;
l.wOut = (l.HTH + l.reg * eye(l.hidDim))\(l.HTY);
end
end
% This function implements an online least squares method.
% Before the first run, a manual washout should be performed by
% executing
% l.calcHiddenStates(X);
% for woSteps number of samples.
% No forgetting rate is currently implemented.
function train_online(l, X, Y)
if isempty(l.HTH) || isempty(l.HTY)
% if no prior training has been performed (either batch or
% online): initialize statistics with zeros
l.HTH = zeros(l.hidDim, l.hidDim);
l.HTY = zeros(l.hidDim, l.outDim);
end
if l.inpNormalization
X = range2norm(X, l.inpRange, l.inpOffset);
end
if l.outNormalization
Y = range2norm(Y, l.outRange, l.outOffset);
end
if l.convergeSteps > 0
convergenceInput = repmat(X(1,:), l.convergeSteps, 1);
l.calcHiddenStates(convergenceInput);
end
% update the statistics
H = l.calcHiddenStates(X);
l.HTH = l.HTH + H' * H;
l.HTY = l.HTY + H' * Y;
% compute the new weights
l.wOut = (l.HTH + l.reg * eye(l.hidDim))\(l.HTY);
%l.wOut = l.HTH\(l.HTY);
end
function [Y] = apply(l, X)
if iscell(X)
Y = cell(length(X),1);
for i=1:length(X)
Y{i} = l.apply(X{i});
end
else
if (l.inpNormalization)
X = range2norm(X, l.inpRange, l.inpOffset);
end
%l.calcHiddenStates(repmat(X(1,:), l.woSteps, 1)); %washout transients
if l.convergeSteps > 0
convergenceInput = repmat(X(1,:), l.convergeSteps, 1);
l.calcHiddenStates(convergenceInput);
end
if l.use_init_h
l.h = l.h_init;
end
H = l.calcHiddenStates(X);
if l.collectH
l.Hcoll = H;
end
Y = H * l.wOut;
if (l.outNormalization)
Y = norm2range(Y, l.outRange, l.outOffset);
end
l.out = Y(end,:);
end
end
% input to calcHiddenStates is always one sequence (steps x dim)
function [H] = calcHiddenStates(l, X)
numSamples = size(X,1);
H = zeros(numSamples, l.hidDim);
for k=1:numSamples
l.act(X(k,:));
H(k,:) = l.h;
end
end
function act(l, x)
l.a = l.wInp * x' + l.wRes * l.h + l.bias;
% Hx1 = HxI * (1xI)' + HxH * Hx1 + Hx1
if l.linearESN
l.h = l.a;
else
% nonlinear activation function
if l.useReLU
l.h = l.a;
l.h(l.h<0) = 0;
else
l.h = tanh(l.a);
end
end
if l.dt ~= 1
l.h = (1-l.dt) * l.h_old + l.dt * l.h;
l.h_old = l.h;
end
end
% If this method is used, the data normalization constants should
% be set in advance. Use
% l.normalizeIO(X, Y);
% for this purpose.
function set_initState(l, X)
if l.inpNormalization
X = range2norm(X, l.inpRange, l.inpOffset);
end
H = l.calcHiddenStates(X);
l.h_init = H(end,:)';
end
% new function performing a json export of a trained reservoir.
% str specifies the name of the output
% if mode == 1, the method will export only the output weights,
% if mode == 2, everything is exported
function export(l, str, mode)
if nargin < 3
mode = 1;
end
if mode == 1
savejson('esn', l.wOut', str);
elseif mode == 2
% select the properties to be stored
res.inpDim = l.inpDim;
res.hidDim = l.hidDim;
res.outDim = l.outDim;
res.wInp = l.wInp;
res.inpOffset = l.inpOffset;
res.inpRange = l.inpRange;
res.bias = l.bias;
res.dt = l.dt;
res.universRes = l.universRes;
res.wRes = l.wRes;
res.outOffset = l.outOffset;
res.outRange = l.outRange;
savejson('esn', res, str);
end
end
function reset(l)
l.h = zeros(l.hidDim,1);
l.h_old = zeros(l.hidDim,1);
l.a = zeros(size(l.h));
%l.aNew = zeros(size(l.h));
l.y = zeros(1,l.outDim);
end
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
classdef Learner < handle
%LEARNER Interface class for function approximator models.
% Detailed explanation goes here
properties
inpDim = 0; %dimension of input
outDim = 0; %dimension of output
out = [];
inpNormalization = 1; % (de)activates normalization of the input
outNormalization = 1; % (de)activates normalization of the output
inpOffset = [];
inpRange = [];
outOffset = [];
outRange = [];
end
methods
function l = Learner(inpDim, outDim, spec)
l.inpDim = inpDim;
l.outDim = outDim;
l.out = zeros(1,l.outDim);
if nargin > 2
for s=1:size(spec,1)
if ~strcmp(spec{s,1}, 'class')
try
l.(spec{s,1}) = spec{s,2};
catch me
disp(me.message);
end
end
end
end
end
function [X Y] = normalizeIO(l, X, Y)
% default values for already normalized data
l.inpOffset = zeros(1, l.inpDim);
l.outOffset = zeros(1, l.outDim);
l.inpRange = repmat([-1; 1], 1, l.inpDim);
l.outRange = repmat([-1; 1], 1, l.outDim);
if l.inpNormalization && nargin > 1
[X l.inpOffset l.inpRange] = normalize(X);
end
if l.outNormalization && nargin > 2
[Y l.outOffset l.outRange] = normalize(Y);
end
end
function init(l, X, Y)
end
% expect X and Y to be either matrices or cell-arrays of matrices
function train(l, X, Y)
end
function Y = apply(l, X)
end
function new = copy(this)
new = feval(class(this));
p = properties(this);
for i = 1:length(p)
if( isa(this.(p{i}), 'handle'))
new.(p{i}) = this.(p{i}).copy();
elseif isa(this.(p{i}),'cell')
new.(p{i}) = deepCopyCellArray(numel(this.(p{i})));
else
new.(p{i}) = this.(p{i});
end
end
end
end
end
function P = chud_pi(d)
% CHUD_PI Chudnovsky algorithm for pi.
% chud_pi(d) produces d decimal digits.
%
% From Mathworks:
% https://de.mathworks.com/company/newsletters/articles/computing-pi.html
k = sym(0);
s = sym(0);
sig = sym(1);
n = ceil(d/14);
for j = 1:n
s = s + sig * prod(3*k+1:6*k)/prod(1:k)^3 * ...
(13591409+545140134*k) / 640320^(3*k+3/2);
k = k+1;
sig = -sig;
end
S = 1/(12*s);
P = vpa(S,d);
\ No newline at end of file
% create pi to a certain precision and write it to a file
% compute Pi for a reservoir of size 1000 with 8 inputs
num_digits = 8*1000;
p = chud_pi(num_digits);
% convert the sym number to a string
piStr = char(p);
% represent each digit as an entry in a vector
piVec = zeros(numel(piStr)-1,1);
piVec(1) = 3;
for i = 3:numel(piStr)
piVec(i-1) = str2double(piStr(i));
end
% write it to a file
savejson('pi8000', piVec,'pi_8000.json');
% --- don't use the csv format for strings. they do not load
% csvwrite(sprintf('pi_%i.csv', num_digits), piStr);
\ No newline at end of file
function [ M ] = createSparseMatrix(rows, cols, density, range)
%CREATESPARSEMATRIX Summary of this function goes here
% Detailed explanation goes here
if density == 1.0
M = randu(rows,cols,range);
return;
end
nonzero = max(0, min(round(density * rows * cols), rows * cols));
M = zeros(rows, cols);
p = randperm(rows * cols);
for i=1:nonzero
row = ceil(p(i)/cols);
col = mod(p(i), cols) + 1;
M(row, col) = randu(1,1,range);
end
M = sparse(M);
end
% test ESN on the adding problem ... ESNs do not work well here
% generate the data
seqL = 40;
numS_tr = 10000;
numS_te = 1000;
X = cell(numS_tr+numS_te,1);
Y = cell(numS_tr+numS_te,1);
for s = 1:(numS_tr+numS_te)
X{s} = rand(seqL,1);
X{s} = [X{s}, zeros(seqL,1)];
% select two random entries
randIdx = randperm(seqL);
randIdx = randIdx(1:2);
X{s}(randIdx,2) = 1;
Y{s} = repmat(sum(X{s}(randIdx,1)), seqL, 1);
end
X_tr = X(1:numS_tr);
X_te = X(numS_tr+1:numS_tr+numS_te);
Y_tr = Y(1:numS_tr);
Y_te = Y(numS_tr+1:numS_tr+numS_te);
%% apply the ESN model
resS = 200;
reg = 0.01;
woSteps = 1; % dont do washout since the inportant position can be anywhere
dimIn = 2;
dimOut = 1;
dtCons = 0.4;
specRad = 0.9;
univRes = 0;
linESN = 0;
useRelu = 0;
lspecs = {'class','ESN'; 'hidDim',resS; 'reg',reg; ...
'woSteps',woSteps; 'inpDim',dimIn; 'outDim',dimOut; ...
'dt', dtCons; 'specRad', specRad; 'universRes', univRes; ...
'linearESN', linESN; 'useReLU',useRelu; 'use_init_h', 1}; %; ...
%'universResPars', [v,rc,rj,jumps]; };
esn = ESN(dimIn, dimOut, lspecs);
esn.init();
% set the normalization constants
esn.normalizeIO(cell2mat(X_tr), cell2mat(Y_tr));
esn.set_initState([rand(seqL,1) zeros(seqL,1)]);
esn.train(X_tr, Y_tr);
preds_te = esn.apply(X_te);
result_vals = zeros(numS_te,2);
for i=1:numel(X_te)
result_vals(i,:) = [preds_te{i}(end) Y_te{i}(end)];
end
constMSE = mean((result_vals(:,2) - 1).^2)
esnMSE = mean((result_vals(:,2) - result_vals(:,1)).^2)
% demo memory capacity
% compute the memory capacity on a 1D signal as in
% Jaeger: Short term memory in echo state networks, 2002
%% generate the input
seqL_tr = 200;
seqL_te = 200;
seqL = seqL_tr + seqL_te;
nRep = 10;
Xtmp = 2*rand(round(seqL/nRep), 1) -1;
% copy each value 10 times to reproduce jaergers scenario
X = zeros(seqL,1);
count = 0;
for i=1:nRep:seqL
count = count+1;
X(i:i+nRep-1) = repmat(Xtmp(count), nRep, 1);
end
X_tr = X(1:seqL_tr);
X_te = X(seqL_tr+1:seqL_tr+seqL_te);
%% generate the target, i.e. delay the input
K = 40;
Y = zeros(seqL,K);
for k=1:K
Y(1:k,k) = 2*rand(k, 1) -1;
Y(k+1:end,k) = X(1:end-k);
end
Y_tr = Y(1:seqL_tr, :);
Y_te = Y(seqL_tr+1:seqL_tr+seqL_te, :);
%% apply the ESN model
resS = 20;
reg = 0.00000;
woSteps = 100;
dimIn = 1;
dimOut = K;
dtCons = 1; % no leaky rate as in jaegers paper
specRad = 0.90;
univRes = 0;
linESN = 0;
psdRes = 1;
useRelu = 0;
lspecs = {'class','ESN'; 'hidDim',resS; 'reg',reg; 'inpScale', 1; ...
'woSteps',woSteps; 'inpDim',dimIn; 'outDim',dimOut; 'psdRes', psdRes; ...
'dt', dtCons; 'specRad', specRad; 'universRes', univRes; ...
'linearESN', linESN; 'useReLU',useRelu; 'use_init_h', 0}; %; ...
%'universResPars', [v,rc,rj,jumps]; };
esn = ESN(dimIn, dimOut, lspecs);
esn.init();
esn.train(X_tr, Y_tr);
preds_te = esn.apply(X_te);
preds_te = preds_te(woSteps+1:end,:);
Y_teEval = Y_te(woSteps+1:end,:);
preds_tr = esn.apply(X_tr);
preds_tr = preds_tr(woSteps+1:end,:);
Y_trEval = Y_tr(woSteps+1:end,:);
corrSq_tr = zeros(K,1);
for k=1:K
corrSq_tr(k) = ...
mean((preds_tr(:,k) - mean(preds_tr(:,k))) .* (Y_trEval(:,k) - mean(Y_trEval(:,k))))/ ...
(std(preds_tr(:,k)) * std(Y_trEval(:,k)));
corrSq_tr(k) = corrSq_tr(k)^2;
end
corrSq = zeros(K,1);
for k=1:K
corrSq(k) = ...
mean((preds_te(:,k) - mean(preds_te(:,k))) .* (Y_teEval(:,k) - mean(Y_teEval(:,k))))/ ...
(std(preds_te(:,k)) * std(Y_teEval(:,k)));
corrSq(k) = corrSq(k)^2;
end
MC = sum(corrSq);
figure
%myFigure;
subplot(1,2,1);
plot(corrSq_tr);
title(['trainingm MC: ' num2str(sum(corrSq_tr))]); axis([0 K 0 1]);
subplot(1,2,2); plot(corrSq);
title(['testing, MC: ' num2str(MC)]); axis([0 K 0 1]);
% % plot example predictions
% k = 7;
% myFigure; hold on;
% plot(1:size(Y_teEval,1), Y_teEval(:,k), 'b');
% plot(1:size(preds_te,1), preds_te(:,k), 'g');
%
% myFigure; hold on;
% plot(1:size(Y_trEval,1), Y_trEval(:,k), 'b');
% plot(1:size(preds_tr,1), preds_tr(:,k), 'g');
function [acc,classwiseAcc] = eval_contin_fun_pred(targets, preds, woSteps, showResults, colors, col_dofs, model)
% This function receives a target signal and a prediction of a model.
% It excepts the targets to be scaled from -1 to 1, with a three class
% encoding consisting of classes -1, 0 and +1. The function removes all the
% values in between this
if nargin < 4
showResults = 0;
end
if nargin < 7
model = '';
end
if ~iscell(targets)
targets = {targets};
preds = {preds};
end
% compe an accuracy for each class in each Dof
dofs = size(targets{1},2);
% there are 3 classes per degree of freedom: -1,0,1
classes = [-1 0 1];
numSeq = numel(targets);
accs = zeros(numSeq, dofs);
classwiseAccs = zeros(numel(classes), dofs, numSeq);
for seq = 1:numSeq
% remove the first woSteps instances
predsS = preds{seq}(woSteps:end,:);
targetsS = targets{seq}(woSteps:end,:);
% remove the intermediate segments
addr_vec = (abs(targetsS) <= 0.001) + (abs(targetsS) >= 0.99);
addr_vec = logical(min(addr_vec, [], 2));
preds_cut = round(predsS(addr_vec, :));
targets_cut = round(targetsS(addr_vec, :));
% compute the overall accuracy
accs(seq,:) = sum(preds_cut == targets_cut)/size(preds_cut,1);
for dof = 1:dofs
for cl = 1:numel(classes)
curr_class_idx = classes(cl) == targets_cut(:, dof);
curr_pred = preds_cut(curr_class_idx, dof);
% compute the accuracy of class cl in degree of freedom dof
classwiseAccs(cl, dof, seq) = sum(curr_pred == classes(cl))/sum(curr_class_idx);
end
end
end
acc = sum(accs,1)/numSeq;
classwiseAcc = sum(classwiseAccs,3)/numSeq;
% print the accuracy?
if showResults > 0
if dofs == 1
disp(['=========' model '=Performance=========']);
fprintf(' DoF1 \n');
fprintf(' overall acc: %6.2f%% \n', 100*acc(1));
disp(' -------------------------------');
fprintf(' cl ''-1'' acc: %6.2f%% \n', 100*classwiseAcc(1,1));
fprintf(' cl ''0'' acc: %6.2f%% \n', 100*classwiseAcc(2,1));
fprintf(' cl ''+1'' acc: %6.2f%% \n', 100*classwiseAcc(3,1));
disp('=================================');
elseif dofs == 2
disp(['=========' model '=Performance=========']);
fprintf(' DoF1 DoF2\n');
fprintf(' overall acc: %6.2f%% %6.2f%%\n', 100*acc(1), 100*acc(2));
disp(' -------------------------------');
fprintf(' cl ''-1'' acc: %6.2f%% %6.2f%%\n', 100*classwiseAcc(1,1), 100*classwiseAcc(1,2));
fprintf(' cl ''0'' acc: %6.2f%% %6.2f%%\n', 100*classwiseAcc(2,1), 100*classwiseAcc(2,2));
fprintf(' cl ''+1'' acc: %6.2f%% %6.2f%%\n', 100*classwiseAcc(3,1), 100*classwiseAcc(3,2));
disp('=================================');
elseif dofs == 3
fprintf('The accuracy amounts to %5.2f%% %5.2f%% %5.2f%% \n', ...
100*acc(1), 100*acc(2), 100*acc(3));
% TODO: add the detailed accuracies
end
end
% do visualizations?
if showResults > 1
myFigure; hold on;
nSamples = size(targets_cut,1);
hTar = zeros(dofs,1);
hPred = zeros(dofs,1);
if showResults == 2
prettyShift = [0.005 -0.005];
elseif showResults == 3
prettyShift = [0 0];
end
% plot the target curve and the predictions
for dof=1:dofs
if showResults == 3
subplot(dofs,1,dof); hold on;
end
hTar(dof) = plot(1:nSamples, targets_cut(:, dof)+prettyShift(dof), '-', 'Color', squeeze(colors(col_dofs(dof), 1, :)));
hPred(dof) = plot(1:nSamples, preds_cut(:, dof), 'o', 'Color', squeeze(colors(col_dofs(dof), 3, :)));
currAx = axis;
axis([0 currAx(2) -1.1 1.1])
end
% add a legend and the accuracy
if showResults == 2
% use a brief accuracy description
legend([hTar; hPred], {'DoF1 gt', 'DoF2 gt', 'DoF1 pred', 'DoF2 pred'});
title(sprintf('%s Accuracy: DoF1: %5.2f%%, DoF2: %5.2f%%', model, 100*acc(1), 100*acc(2)));
elseif showResults == 3
% provide a detailed accuracy description
for dof=1:dofs
subplot(dofs,1,dof);
currDof = sprintf('DoF%i', dof);
legend([hTar(dof); hPred(dof)], {[currDof ' gt'], [currDof ' pred']});
title(sprintf('%s accs in DoF%i: %2.0f%%, %2.0f%%, %2.0f%%', ...
model, dof, 100*classwiseAcc(1,dof), ...
100*classwiseAcc(2,dof), 100*classwiseAcc(3,dof)));
end
end
end
\ No newline at end of file
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Example for learning in the model space
%The goal is to differentiate between two noisy sine waves
length = 10000;
sample_length = 100;
noise_std = 0.1;
n_samples = 2*length/sample_length
N = 30;
rng(42);
t=[0:length]';
sin1=sin(0.2*t)+noise_std*randn(length+1,1);
sin2=sin(0.311*t)+noise_std*randn(length+1,1);
X_mat = zeros(n_samples,sample_length);
X = cell(n_samples,1);
Y = cell(n_samples,1);
ind = 1;
for i=1:sample_length:length
X{ind} = sin1(i:i+sample_length-1);
X_mat(ind,:) = X{ind};
Y{ind} = sin1(i+1:i+sample_length);
ind = ind + 1;
X{ind} = sin2(i:i+sample_length-1);
X_mat(ind,:) = X{ind};
Y{ind} = sin2(i+1:i+sample_length);
ind = ind + 1;
end
sine1_ind = 1:2:n_samples;
sine2_ind = 2:2:n_samples;
% figure
% plot(X{1})
% figure;
% plot(Y{1})
lspecs = {'class','ESN'; 'hidDim',N; 'reg',1; 'woSteps',5; 'inpDim',1; 'outDim',1};
%% create learner
rng(42);
learner = ESN(1, 1, lspecs);
%cmd = ['learner = ' lspecs{1,2} '(inpDim, outDim, lspecs);'];
%eval(cmd);
learner.init(X(1:2));
%% create models
models = zeros(n_samples,N);
for i=1:n_samples
learner.h = zeros(N,1);
learner.train(X(i), Y(i));
models(i,:) = reshape(learner.wOut, 1, numel(learner.wOut));
end
figure('position', [300, 150, 1000, 800])
subplot(2,2,1:2)
plot(sin1(1:500), 'b');
hold on;
plot(sin2(1:500), 'g');
title('Signals');
legend('sin(0.2)', 'sin(0.311)');
subplot(2,2,3)
[eigvalues eigvectors repr mX]= pca(X_mat, 2);
hold on;
plot(repr(sine1_ind,1), repr(sine1_ind,2), 'bo')
plot(repr(sine2_ind,1), repr(sine2_ind,2), 'go')
legend('sin(0.2)', 'sin(0.311)');
title('Signal Space')
xlabel('pc1'); ylabel('pc2');
subplot(2,2,4)
[eigvalues eigvectors repr mX]= pca(models, 2);
plot(repr(sine1_ind,1), repr(sine1_ind,2), 'bo')
hold on;
plot(repr(sine2_ind,1), repr(sine2_ind,2), 'go')
legend('sin(0.2)', 'sin(0.311)');
title('Model Space')
xlabel('pc1'); ylabel('pc2');
hold off;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [y] = norm2range(x, ra, offset, minmax)
%NORM2RANGE y = norm2range(x, ra, minmax)
% ra is a dim x 2 matrix holding min and max values per dimension
% minmax is repmat([-1 1], dim, 1) by default
if nargin < 3
offset = zeros(1, size(x,2));
end
if nargin < 4
minmax = repmat([-1 1]', 1, size(x,2));
end
%cut exceeding dimensions
ra = ra(:,1:min(size(ra,2), size(x,2)));
x = x(:,1:min(size(ra,2), size(x,2)));
tmp = minmax(2,:) - minmax(1,:);
tmp(tmp==0) = 1;
scale = 1./(tmp);
range = (ra(2,:) - ra(1,:));
%y = zeros(size(x));
%for i=1:size(x,1)
% y(i,:) = (scale .* (x(i,:) - minmax(1,:))) .* range + ra(1,:);
% y(i,:) = y(i,:) + offset;
%end
y = repmat(scale .* range, size(x,1), 1) .* (x - repmat(minmax(1,:), size(x,1), 1)) + repmat(ra(1,:) + offset, size(x,1), 1);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [data offset range] = normalize(data)
%NORMALIZE Summary of this function goes here
% Detailed explanation goes here
offset = mean(data, 1);
%data = data - repmat(offset, size(data,1), 1);
data = bsxfun(@minus, data, offset);
range = [min(data, [], 1); max(data, [], 1)];
data = range2norm(data, range);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [eigvalues eigvectors coeffs mX] = pca(X, dim)
%PCA Summary of this function goes here
% [eigvalues eigvectors coeffs mX] = pca(X, dim)
numSamples = size(X,1);
%remove mean
mX = mean(X,1);
X = X - repmat(mX, numSamples, 1);
%calc covariance matrix
C = X'*X;
%calc eigenvectors and values
[v, e] = eig(C);
%sort eigenvalues and vectors
% [eigvalues, indices] = sort(diag(e), 'descend');
% eigvectors = v(:,indices);
eigvalues = flipdim(diag(e),1); % eig returns everything in ascending order
eigvectors = flipdim(v,2);
%conduct transformation into pca-space using dim components
coeffs = X * eigvectors(:,1:dim);
end
This diff is collapsed.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [ R ] = randu(rows, cols, range)
%RANDU Summary of this function goes here
% Detailed explanation goes here
if nargin < 3
range = 1;
end
R = range * 2.0 * (rand(rows, cols) - 0.5);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Copyright (c) 2013 R. F. Reinhart, CoR-Lab %%%
%%% Bielefeld University, Germany, http://cor-lab.de %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [y] = range2norm(x, ra, offset, minmax)
%RANGE2NORM y = range2norm(x, ra, minmax)
% ra is a dim x 2 matrix holding min and max values per dimension
% minmax is repmat([-1 1], dim, 1) by default
if nargin < 3
offset = zeros(1, size(x,2));
end
if nargin < 4
minmax = repmat([-1 1]', 1, size(x,2));
end
%cut exceeding dimensions
ra = ra(:,1:min(size(ra,2), size(x,2)));
x = x(:,1:min(size(ra,2), size(x,2)));
tmp = ra(2,:) - ra(1,:);
tmp(tmp==0) = 1;
scale = 1 ./ (tmp);
%y = zeros(size(x));
%for i=1:size(x,1)
%y(i,:) = x(i,:) - offset;
%%y(i,:) = 2 * (((y(i,:) - ra(1,:)) .* scale) - 0.5);
%y(i,:) = ((minmax(2,:) - minmax(1,:)) .* ((y(i,:) - ra(1,:)) .* scale)) + minmax(1,:);
%end
y = repmat(minmax(2,:) - minmax(1,:), size(x,1), 1) .* (x - repmat(offset, size(x,1), 1) - repmat(ra(1,:), size(x,1), 1)) .* repmat(scale, size(x,1), 1) + repmat(minmax(1,:), size(x,1), 1);
%y = bsxfun(@plus, bsxfun(@times, bsxfun(@times, minmax(2,:) - minmax(1,:), bsxfun(@minus, x, offset-ra(1,:))), scale), minmax(1,:));
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
%% Some initial settings
clear all; close all; clc;
%% generate the data
nSamples = 1000;
nChan = 8;
nOuts = 3;
X = zeros(nSamples , nChan);
Y = zeros(nSamples , nOuts);
for n = 1:nOuts
freq = 0.02*pi;
Y(:, n) = sin(freq * (1:nSamples) + (2*n)/(3*freq));
Y(Y(:,n) < 0, n) = 0;
end
for n = 1:nChan
freq = 0.5 + randn*0.01;
if n <=3
ampl = Y(:,1) + 1;
elseif n <=6
ampl = Y(:,2) + 1;
else
ampl = Y(:,3) + 1;
end
X(:,n) = ampl.*sin(freq * (1:nSamples)') + randn(nSamples, 1) * 0.1;
end
figure; hold on;
plot(1:nSamples, X(:,1), 'b');
plot(1:nSamples, Y(:,1), 'g');
hold off;
%% initialize an ESN
woSteps = 20;
N = 300;
lspecs = {'class','ESN'; 'hidDim',N; 'reg',1e-7; 'woSteps',woSteps; 'inpDim',8; 'outDim',3; 'dt', 0.25; 'specRad', 0.9; 'universRes', 0; 'linearESN', 0; 'useReLU', 0; 'psdRes', 0};
esn = ESN(8, 3, lspecs);
esn.init();
esn.train(X, Y);
%% apply the ESN
esn.reset();
Y_esn = esn.apply(X);
%% Compare with linear regression
W = (X' * X + 1E-16 * eye(nChan)) \ X' * Y;
Y_lr = X * W;
%% plot the results
addpath ../plotting/;
colors = tango_color_scheme();
figure;
hold on;
for dof=1:3
plot(1:nSamples, Y(:, dof), '-', 'Color', squeeze(colors(dof, 1, :)));
plot(1:nSamples, Y_esn(:, dof), '-', 'Color', squeeze(colors(dof, 3, :)));
plot(1:nSamples, Y_lr(:, dof), '--', 'Color', squeeze(colors(dof, 3, :)));
end
hold off;
%% export results to JSON
addpath ../jsonlab/;
savejson('raw', X, 'raw.json');
esn.export('reservoir.json', 2);
esn.export('out.json', 1);
from pandas import Series
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
import pdb
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
def find_lag(singe_dim_acf_val, corresponding_conf_val):
t = 0;
is_in_interv = False;
while (is_in_interv is False and t<len(singe_dim_acf_val)):
if(t<len(singe_dim_acf_val)):
upper_margin = corresponding_conf_val[t, 1] - singe_dim_acf_val[t]
lower_margin = corresponding_conf_val[t, 0] - singe_dim_acf_val[t]
if (singe_dim_acf_val[t] < upper_margin):
is_in_interv = True
if (singe_dim_acf_val[t] < 0 and singe_dim_acf_val[t] > lower_margin):
is_in_interv = True
else:
t=-1
t = t+1
return (t-1)
lag_values = []
trial = 1
#for trial in range(0,len(tr)):
traj = tr[trial]
traj_lags = []
series = []
for dim in traj.keys():
h = []
series = traj[dim]
series = difference(series)
ac_val, conf_ac_val = acf(series,nlags=40, alpha=0.05)
pac_val, conf_pac_val = pacf(series,nlags=40, alpha=0.05)
ac_lags = find_lag(ac_val,conf_ac_val)
pac_lags = find_lag(pac_val,conf_pac_val)
h.append(ac_lags)
h.append(pac_lags)
traj_lags.append(h)
print('Done with dim:'+ dim)
print('Done with trial:'+ str(trial))
lag_values.append(traj_lags)
plot_acf(series)
pyplot.show(block=False)
pdb.set_trace()
plot_pacf(series)
pyplot.show(block=False)
'''
pyplot.figure()
pyplot.plot(ac_val,'*')
pyplot.figure()
pyplot.plot(conf_ac_val, '*')
pyplot.figure()
pyplot.plot(conf_ac_val[:, 0] - ac_val, '*')
pyplot.figure()
pyplot.plot(confs[:, 1] - ac_val, '*')
plot_acf(series)
pyplot.show(block=False)
'''
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment