Pascal下的 BackPropagation 算法

在Free Pascal 2.0.0 下编译通过
编的是Machine Learning上的样例:8x3x8,也是我在TI上编的
电脑就是快啊!10000次训练眼睛都不用怎么眨~
uses sysutils;
const
    nin=8;
    nhid=3;
    nout=8;
    rate=0.1;

var
    t,p,q,r,v,tt,tc:longint;
    l1:array[1..nin,1..nhid]of real;
    l2:array[1..nhid,1..nout]of real;
    i:array[1..nin]of longint;
    eo,o,ot:array[1..nout]of real;
    h,eh:array[1..nhid]of real;
    s1,s2:string;
    te:real;

function sigmod(x:real):real;
begin exit(1/(1+exp(-x)));end;

begin
    if fileexists(‘bpnet.dat’) then
    begin
        writeln(‘Reading Network Data:’);
        assign(input,’bpnet.dat’);
        reset(input);
        for t:=1 to nin do
        for p:=1 to nhid do
        read(l1[t,p]);
        for t:=1 to nhid do
        for p:=1 to nout do
        read(l2[t,p]);
        close(input);
    end else
    begin
        writeln(‘Data not exist,initializating…’);
        for t:=1 to nin do
        for p:=1 to nhid do
            l1[t,p]:=random(100)/10000;
        for t:=1 to nhid do
        for p:=1 to nout do
            l2[t,p]:=random(100)/10000;
    end;
    assign(input,”);reset(input);
    //Init weights

    write(‘Train times:’);readln(tt);
    for tc:=1 to tt do
    begin
    write(‘Input  :’);readln(s1);
    write(‘Target :’);readln(s2);
    for t:=1 to nin do i[t]:=ord(s1[t])-48;
    for t:=1 to nout do ot[t]:=ord(s2[t])-48;
    //Input train data

    for t:=1 to nhid do
    begin
        for p:=1 to nin do
            h[t]:=h[t]+i[p]*l1[p,t];
        h[t]:=sigmod(h[t]);
    end;
    for t:=1 to nout do
    begin
        for p:=1 to nhid do
            o[t]:=o[t]+h[p]*l2[p,t];
        o[t]:=sigmod(o[t]);
    end;
    //feedforward

    te:=0;
    for t:=1 to nout do
    begin
        eo[t]:=o[t]*(1-o[t])*(ot[t]-o[t]);
        te:=te+abs(o[t]-ot[t]);
    end;
    for t:=1 to nhid do
    begin
        for p:=1 to nout do
            eh[t]:=eh[t]+eo[p]*l2[t,p];
        eh[t]:=h[t]*(1-h[t])*eh[t];
    end;
    //calculate error

    for t:=1 to nhid do
        for p:=1 to nout do
        l2[t,p]:=l2[t,p]+rate*eo[p]*h[t];
    for t:=1 to nin do
        for p:=1 to nhid do
        l1[t,p]:=l1[t,p]+rate*eh[p]*i[t];
    //change weights

    for t:=1 to nout do write(o[t]:0:2,’ ‘);writeln;
    writeln(‘Error in all:’,te:0:3);
    //output statics
    end;

    writeln(‘Writing network data’);
    assign(output,’bpnet.dat’);rewrite(output);
    for t:=1 to nin do
    begin
        for p:=1 to nhid do write(l1[t,p],’ ‘);
        writeln;
    end;
    for t:=1 to nhid do
    begin
        for p:=1 to nout do write(l2[t,p],’ ‘);
        writeln;
    end;
    close(output);
    //write network data

    assign(output,”);rewrite(output);
    writeln(‘Finally.’);readln;
end.

One Comment

  1. /************************************************************************ * BackPropagation Neural Network Example v 0.1 – 2005-12-15 * By Hearson(张闻宇) ************************************************************************/#define DEBUG 1#define BUILD_DATE 20051225#include <stdio.h>#include <math.h>#include <string.h>#include <ctype.h>#include <stdlib.h>#define WEIGHT_DATA_FILE "bpnn.dat"#define INPUT_LAYER 2#define HIDDEN_LAYER 2#define OUTPUT_LAYER 1// Learning Rate#define LR 0.1struct s_bpnn_weight{ float hi[HIDDEN_LAYER][INPUT_LAYER + 1]; float oh[OUTPUT_LAYER][HIDDEN_LAYER + 1];};struct s_bpnn_unit{ float i[INPUT_LAYER + 1]; float h[HIDDEN_LAYER + 1]; float o[OUTPUT_LAYER];};struct train_example{ float i[INPUT_LAYER]; float t[OUTPUT_LAYER];};struct s_bpnn_weight bw;struct s_bpnn_unit bu;void welcome(){ printf(" * BackPropagation Neural Network Example – Build %d * By Hearson\n\n", BUILD_DATE); return;}void help(){ printf("Usage:\n"); printf("bpnn.exe [-r|-t|-e]\n"); printf(" * -r: Reset all the weights in this neural network with random generated numbers\n"); printf(" * -t: Train this neural network with offered data\n"); printf(" * -e: Test this neural network\n\n"); printf("A neural network must be reseted before using.\n\n");}int initialize_bpnn(){ FILE* wdf; printf("Loading neural network data…"); if (NULL == (wdf = fopen(WEIGHT_DATA_FILE, "r"))) { printf("Unable to open data file!\n\n"); return 1; } // Check whether the data file match this neural network int li, lh, lo; fscanf(wdf, "%d %d %d", &li, &lh, &lo); if ((li != INPUT_LAYER) || (lh != HIDDEN_LAYER) || (lo != OUTPUT_LAYER)) { fclose(wdf); printf("The data file does not match this neural network!\n\n"); return 1; } // Read neural network data int i, j; for (i=0; i<HIDDEN_LAYER; i++) { for (j=0; j<=INPUT_LAYER; j++) { fscanf(wdf, "%f", &bw.hi[i][j]); } } for (i=0; i<OUTPUT_LAYER; i++) { for (j=0; j<=HIDDEN_LAYER; j++) { fscanf(wdf, "%f", &bw.oh[i][j]); } } fclose(wdf); bu.i[INPUT_LAYER] = -1; bu.h[HIDDEN_LAYER] = -1; printf("OK!\n\n"); return 0;}void reset_bpnn(){ int i, j; printf("Reseting Neural Network…"); srand(rand()); // Reset weights for the Input-Hidden Layer for (i=0; i<HIDDEN_LAYER; i++) { for (j=0; j<=INPUT_LAYER; j++) { bw.hi[i][j] = (float)(rand() – RAND_MAX/2) / (float)(RAND_MAX * 100); } } // Reset weights for the Hidden-Output Layer for (i=0; i<OUTPUT_LAYER; i++) { for (j=0; j<=HIDDEN_LAYER; j++) { bw.oh[i][j] = (float)(rand() – RAND_MAX/2) / (float)(RAND_MAX * 100); } } printf("All weights are reseted with random generated numbers!\n"); return;}void update_wdf(){ FILE* wdf; int i, j; printf("Saving neural network data…"); if (NULL == (wdf = fopen(WEIGHT_DATA_FILE, "w"))) { printf("Unable to save neural network data!\n\n"); return; } // Write units in each layer fprintf(wdf, "%d %d %d\n\n", INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER); // Input-Hidden Layer for (i=0; i<HIDDEN_LAYER; i++) { for (j=0; j<=INPUT_LAYER; j++) { fprintf(wdf, "%f ", bw.hi[i][j]); } fprintf(wdf, "\n"); } fprintf(wdf, "\n"); // Hidden-Output Layer for (i=0; i<OUTPUT_LAYER; i++) { for (j=0; j<=HIDDEN_LAYER; j++) { fprintf(wdf, "%f ", bw.oh[i][j]); } fprintf(wdf, "\n"); } fclose(wdf); printf("OK!\n\n"); return;}float sigmoid(float x){ if (x <= -20) { return 0; } if (x >= 20) { return 1; } return (1 / ( 1 + exp(-x) ) );}void calculate_output(){ int i, j; // Calculate the Hidden Layer for (i=0; i<HIDDEN_LAYER; i++) { bu.h[i] = 0; for (j=0; j<=INPUT_LAYER; j++) { bu.h[i] += bu.i[j] * bw.hi[i][j]; } bu.h[i] = sigmoid(bu.h[i]); } // Calculate the Output Layer for (i=0; i<OUTPUT_LAYER; i++) { bu.o[i] = 0; for (j=0; j<=HIDDEN_LAYER; j++) { bu.o[i] += bu.h[j] * bw.oh[i][j]; } bu.o[i] = sigmoid(bu.o[i]); } return;}void test_neural_network(){ int i; while (1) { printf("Data for INPUT layer:\n"); for (i=0; i<INPUT_LAYER; i++) { if (EOF == scanf("%f", &bu.i[i])) { return; } } printf("Neural Network returned:\n"); calculate_output(); for (i=0; i<OUTPUT_LAYER; i++) { printf("%f ", bu.o[i]); } printf("\n\n"); } return;}int train_neural_network(){ int train_times, example_num, i, j, k, l; struct train_example *example; float esum; printf("Training times: "); scanf("%d", &train_times); printf("Number of examples: "); scanf("%d", &example_num); example = malloc(example_num * sizeof(struct train_example)); if (example == NULL) { printf("Error: Cannot malloc memory for examples!\n\n"); return 1; } for (i=0; i<example_num; i++) { printf("\nData for example %d:\n", i+1); for (j=0; j<INPUT_LAYER; j++) { scanf("%f", &example[i].i[j]); } printf("Target output for example %d:\n", i+1); for (j=0; j<OUTPUT_LAYER; j++) { scanf("%f", &example[i].t[j]); } } printf("Training neural network…"); // Train Neural Network float eo[OUTPUT_LAYER], eh[HIDDEN_LAYER + 1]; for (i=0; i<train_times; i++) { for (j=0; j<example_num; j++) { memcpy(bu.i, example[j].i, sizeof(example[j].i)); calculate_output(); // Calculate error for Output Layer for (k=0; k<OUTPUT_LAYER; k++) { eo[k] = bu.o[k] * (1 – bu.o[k]) * (example[j].t[k] – bu.o[k]); } // Calculate error for Hidden Layer for (k=0; k<=HIDDEN_LAYER; k++) { esum = 0; for (l=0; l<OUTPUT_LAYER; l++) { esum += bw.oh[l][k] * eo[l]; } eh[k] = bu.h[k] * (1 – bu.h[k]) * esum; } // Update the weights for Hidden-Output Layer for (k=0; k<OUTPUT_LAYER; k++) { for (l=0; l<=HIDDEN_LAYER; l++) { bw.oh[k][l] += LR * eo[k] * bu.h[l]; } } // Update the weights for Input-Hidden Layer for (k=0; k<=HIDDEN_LAYER; k++) { for (l=0; l<=INPUT_LAYER; l++) { bw.hi[k][l] += LR * eh[k] * bu.i[l]; } } } } printf("Finished!\n"); return 0;}int main(int argc, char *argv[]){ welcome(); if (argc != 2) { help(); return 0; } argv[1][1] = (char)toupper(argv[1][1]); if (strcmp(argv[1], "-R") == 0) { reset_bpnn(); update_wdf(); return 0; } if (initialize_bpnn()) { return 1; } if (strcmp(argv[1], "-T") == 0) { if (train_neural_network()) { return 1; } update_wdf(); return 0; } else if (strcmp(argv[1], "-E") == 0) { test_neural_network(); return 0; } else { help(); return 1; } return 0;}

Leave a Reply

Your email address will not be published. Required fields are marked *

This site uses Akismet to reduce spam. Learn how your comment data is processed.