% This program calls the DeepReLU function and trains the %network max_epoch times. It enters the training data into the trained %network and displays the output. We verify the adequacy of the training by %comparing the output and correct output. clear all close all clc X = zeros(5, 5, 5); % The input set consists of five 5x5 pixel squares. %0: white pixel; 1: black pixel. X(:, :, 1) = [0 1 1 0 0; 0 0 1 0 0; 0 0 1 0 0; 0 0 1 0 0; 0 1 1 1 0]; %1 X(:, :, 2) = [1 1 1 1 0; 0 0 0 0 1; 0 1 1 1 0; 1 0 0 0 0; 1 1 1 1 1]; %2 X(:, :, 3) = [1 1 1 1 0; 0 0 0 0 1; 0 1 1 1 0; 0 0 0 0 1; 1 1 1 1 0]; %3 X(:, :, 4) = [0 0 0 1 0; 0 0 1 1 0; 0 1 0 1 0; 1 1 1 1 1; 0 0 0 1 0]; %4 X(:, :, 5) = [1 1 1 1 1; 1 0 0 0 0; 1 1 1 1 0; 0 0 0 0 1; 1 1 1 1 0]; %5 % Desired outputs mapped via one-hot encoding (or 1-of-N encoding): D = [1 0 0 0 0; %1 0 1 0 0 0; %2 0 0 1 0 0; %3 0 0 0 1 0; %4 0 0 0 0 1]; %5 % Weights initialization: W1 = 2*rand(20, 25) - 1; W2 = 2*rand(20, 20) - 1; W3 = 2*rand(20, 20) - 1; W4 = 2*rand( 5, 20) - 1; % Training process: max_epoch = 10000; for epoch = 1:max_epoch [W1, W2, W3, W4] = DeepReLU(W1, W2, W3, W4, X, D); end % Inference: N = size(X,3); y = zeros(N,size(D,2)); for k = 1:N x = reshape(X(:, :, k), 25, 1); v1 = W1*x; y1 = ReLU(v1); v2 = W2*y1; y2 = ReLU(v2); v3 = W3*y2; y3 = ReLU(v3); v = W4*y3; y(k,:) = Softmax(v); end disp('Results:'); disp(' [desired]:'); disp(D); disp(' [network_output]:'); disp(y) %% Showing images: % for i = 1:N % compareImages(X(:,:,i), y(i,:)); % end