% demorbf.m
%
% this is an implementation of a simple one-dimensional input Radial Basis
% Function network.
%
% feel free to experiment with your own test functions.
% see further comments in program for more suggestions

   % create one dimensional data
   trainingdata.inputs = [-1:0.13:1]';
   trainingdata.schedin = trainingdata.inputs;
   N = length(trainingdata.inputs(:,1));
   trainingdata.outputs = sin(10*pi*(trainingdata.inputs(:,1)+ ...
                          ones(N,1)).*sin(trainingdata.inputs(:,1)/2).^3)+ ...
                          0.02*randn(N,1); %sin((1.5*inputs(:,1)).^3);
   inputstest = [-1:0.05:1]';



% allocate the basis function centres. Feel free to play with this
spacing = 0.99*0.25;
centres = [-0.95:spacing:0.95]';
%centres =  [ -1 -0.3 0.6 0.8 1]';  % Example of arbitrarily placed bases.

% Try changing the width and spacing variables to see what happens.
% Look at the condition number of the
% matrix basis_act (cond(basis_act)). What does this mean for the estimation
% of the weights to the output (W)?
width   = 0.2;
widths  = ones(size(centres))*(width.^2);	% square & turn into vector
M = length(centres);
N = length(trainingdata.outputs);

% calculate basis function activations (design matrix for regression)
basis_act = exp(-((trainingdata.inputs*ones(1,M)-ones(N,1)*centres').^2)...
            ./(ones(N,1)*widths'));

W = pinv(basis_act)*trainingdata.outputs;	% estimate output widths
y_est = basis_act*W;				% calculate model output


figure(1)
clf
subplot(2,1,1)
plot(trainingdata.inputs,y_est,':',...
     trainingdata.inputs,trainingdata.outputs,'.')
title('Model output and training data')
subplot(2,1,2)
plot(trainingdata.inputs,basis_act)
title('Basis functions')

figure(2)
clf
plot(trainingdata.inputs, basis_act.*(ones(N,1)*W'))
hold on
plot(trainingdata.inputs,y_est,':')
hold off
title('Basis functions weighted by weighting constants, and sum (model output)')