• Main Page
  • Modules
  • Classes
  • Files
  • File List

D:/Projekt/ECF_trunk/examples/GPNN/EvalOp.cpp

00001 #include <cmath>
00002 #include <ecf/ECF.h>
00003 #include "EvalOp.h"
00004 
00005 
00006 
00007 // currently a global network object (simplification)
00008 Network net;
00009 
00010 
00011 // construct a network
00012 Network::Network()
00013 {
00014     // resize network - currently hard coded
00015     netSize = 5;
00016 
00017     neurons.resize(netSize);
00018     for(uint i = 0; i < netSize; i++)
00019         neurons[i].resize(netSize);
00020 }
00021 
00022 
00023 // initialize the network
00024 void Network::initialize(StateP state)
00025 {
00026     // for each neuron
00027     for(uint i = 0; i < netSize; i++)
00028         for(uint j = 0; j < netSize; j++) {
00029 
00030             // set potential, fire prob. and state (random)
00031             neurons[i][j].potential = state->getRandomizer()->getRandomDouble();
00032             neurons[i][j].fireProbability = state->getRandomizer()->getRandomDouble();
00033             neurons[i][j].state = state->getRandomizer()->getRandomDouble();
00034             neurons[i][j].previousState = state->getRandomizer()->getRandomDouble();
00035 
00036             // set initial weights (random in [0,1])
00037             for(uint k = 0; k < 3; k++)
00038                 for(uint l = 0; l < 3; l++)
00039                     neurons[i][j].weight[k][l] = state->getRandomizer()->getRandomDouble();
00040         }
00041 }
00042 
00043 
00044 double Network::learnNumberOfIterations()
00045 {
00046     minError = calculateError();
00047     newError = minError, oldError = minError;
00048     double value, newWeight, reward;
00049 
00050     // run network weights 'optimization' with current individual
00051     for(uint iter = 0; iter < nEvaluations; iter++) {
00052 
00053         // set the 'reward' variable
00054         if(newError < oldError)
00055             reward = 1;
00056         else
00057             reward = -1;    // for example...
00058         //reward = oldError - newError; // an alternative
00059         learningTree->setTerminalValue("reward", &reward);
00060 
00061         // iterate over all the neurons
00062         for(uint i = 0; i < netSize; i++)
00063             for(uint j = 0; j < netSize; j++) {
00064 
00065                 // update terminals relevant to the current neuron:
00066                 learningTree->setTerminalValue("pot", &neurons[i][j].potential);
00067                 learningTree->setTerminalValue("fire", &neurons[i][j].fireProbability);
00068                 learningTree->setTerminalValue("state", &neurons[i][j].state);
00069                 learningTree->setTerminalValue("pstate", &neurons[i][j].previousState);
00070 
00071                 // iterate over neuron weights - update values as the tree result
00072                 for(uint k = 0; k < 3; k++)
00073                     for(uint l = 0; l < 3; l++) {
00074                         // set current weight
00075                         value = neurons[i][j].weight[k][l];
00076                         learningTree->setTerminalValue("old_w", &value);
00077 
00078                         // get the value of the current tree
00079                         learningTree->execute(&newWeight);
00080 
00081                         // check limits (?)
00082                         if(newWeight < -10)
00083                             newWeight = -10;
00084                         else if(newWeight > 10)
00085                             newWeight = 10;
00086 
00087                         // set the new weight
00088                         neurons[i][j].weight[k][l] = newWeight;
00089                 }
00090             }
00091 
00092         // update error (simulate network)
00093         oldError = newError;
00094         newError = calculateError();
00095     }
00096 
00097     return newError;
00098 }
00099 
00100 
00101 // calculate 'error' based on current weights' values
00102 inline double Network::calculateError()
00103 {
00104     // sample optimization function (google: schaffer function f6)
00105     double d = 0;
00106     uint offset, weightNo = 0;
00107     for(uint i = 0; i < netSize; i++)
00108         for(uint j = 0; j < netSize; j++)
00109             for(uint k = 0; k < 3; k++)
00110                 for(uint l = 0; l < 3; l++) {
00111                     offset = i % 5; // makes the problem MUCH harder!
00112                     d += pow(neurons[i][j].weight[k][l] - offset, 2);
00113                 }
00114     return 0.5 - (pow(sin(sqrt(d)), 2) - 0.5) / pow(1 + 0.001 * d, 2);
00115 }
00116 
00117 
00118 
00119 void EvalOp::registerParameters(StateP state)
00120 {
00121     // register default number of algorithm evaluations
00122     state->getRegistry()->registerEntry("nEvaluations", (voidP) (new uint(200)), ECF::UINT);
00123 }
00124 
00125 
00126 // called once before the evolution
00127 bool EvalOp::initialize(StateP state)
00128 {
00129     // network evaluations per single GP individual
00130     voidP sptr = state->getRegistry()->getEntry("nEvaluations"); // get parameter value
00131     net.nEvaluations = *((uint*) sptr.get()); // convert from voidP to user defined type
00132 
00133     state_ = state;
00134 
00135     return true;
00136 }
00137 
00138 
00139 FitnessP EvalOp::evaluate(IndividualP individual)
00140 {
00141     FitnessP fitness (new FitnessMin);
00142     
00143     // get the genotype 
00144     Tree::Tree* tree = (Tree::Tree*) individual->getGenotype().get();
00145 
00146     // set the net
00147     net.initialize(state_);
00148     net.learningTree = tree;
00149 
00150     // set pointer to the net so Tree functions can access it
00151     state_->getContext()->environment = &net;
00152 
00153     // learn the net
00154     double error = net.learnNumberOfIterations();
00155 
00156     // set final error as individual's fitness
00157     fitness->setValue(error);
00158     
00159     return fitness;
00160 }

Generated on Fri Jul 5 2013 09:34:24 for ECF by  doxygen 1.7.1