00001 #include <cmath>
00002 #include <ecf/ECF.h>
00003 #include "EvalOp.h"
00004
00005
00006
00007
00008 Network net;
00009
00010
00011
00012 Network::Network()
00013 {
00014
00015 netSize = 5;
00016
00017 neurons.resize(netSize);
00018 for(uint i = 0; i < netSize; i++)
00019 neurons[i].resize(netSize);
00020 }
00021
00022
00023
00024 void Network::initialize(StateP state)
00025 {
00026
00027 for(uint i = 0; i < netSize; i++)
00028 for(uint j = 0; j < netSize; j++) {
00029
00030
00031 neurons[i][j].potential = state->getRandomizer()->getRandomDouble();
00032 neurons[i][j].fireProbability = state->getRandomizer()->getRandomDouble();
00033 neurons[i][j].state = state->getRandomizer()->getRandomDouble();
00034 neurons[i][j].previousState = state->getRandomizer()->getRandomDouble();
00035
00036
00037 for(uint k = 0; k < 3; k++)
00038 for(uint l = 0; l < 3; l++)
00039 neurons[i][j].weight[k][l] = state->getRandomizer()->getRandomDouble();
00040 }
00041 }
00042
00043
00044 double Network::learnNumberOfIterations()
00045 {
00046 minError = calculateError();
00047 newError = minError, oldError = minError;
00048 double value, newWeight, reward;
00049
00050
00051 for(uint iter = 0; iter < nEvaluations; iter++) {
00052
00053
00054 if(newError < oldError)
00055 reward = 1;
00056 else
00057 reward = -1;
00058
00059 learningTree->setTerminalValue("reward", &reward);
00060
00061
00062 for(uint i = 0; i < netSize; i++)
00063 for(uint j = 0; j < netSize; j++) {
00064
00065
00066 learningTree->setTerminalValue("pot", &neurons[i][j].potential);
00067 learningTree->setTerminalValue("fire", &neurons[i][j].fireProbability);
00068 learningTree->setTerminalValue("state", &neurons[i][j].state);
00069 learningTree->setTerminalValue("pstate", &neurons[i][j].previousState);
00070
00071
00072 for(uint k = 0; k < 3; k++)
00073 for(uint l = 0; l < 3; l++) {
00074
00075 value = neurons[i][j].weight[k][l];
00076 learningTree->setTerminalValue("old_w", &value);
00077
00078
00079 learningTree->execute(&newWeight);
00080
00081
00082 if(newWeight < -10)
00083 newWeight = -10;
00084 else if(newWeight > 10)
00085 newWeight = 10;
00086
00087
00088 neurons[i][j].weight[k][l] = newWeight;
00089 }
00090 }
00091
00092
00093 oldError = newError;
00094 newError = calculateError();
00095 }
00096
00097 return newError;
00098 }
00099
00100
00101
00102 inline double Network::calculateError()
00103 {
00104
00105 double d = 0;
00106 uint offset, weightNo = 0;
00107 for(uint i = 0; i < netSize; i++)
00108 for(uint j = 0; j < netSize; j++)
00109 for(uint k = 0; k < 3; k++)
00110 for(uint l = 0; l < 3; l++) {
00111 offset = i % 5;
00112 d += pow(neurons[i][j].weight[k][l] - offset, 2);
00113 }
00114 return 0.5 - (pow(sin(sqrt(d)), 2) - 0.5) / pow(1 + 0.001 * d, 2);
00115 }
00116
00117
00118
00119 void EvalOp::registerParameters(StateP state)
00120 {
00121
00122 state->getRegistry()->registerEntry("nEvaluations", (voidP) (new uint(200)), ECF::UINT);
00123 }
00124
00125
00126
00127 bool EvalOp::initialize(StateP state)
00128 {
00129
00130 voidP sptr = state->getRegistry()->getEntry("nEvaluations");
00131 net.nEvaluations = *((uint*) sptr.get());
00132
00133 state_ = state;
00134
00135 return true;
00136 }
00137
00138
00139 FitnessP EvalOp::evaluate(IndividualP individual)
00140 {
00141 FitnessP fitness (new FitnessMin);
00142
00143
00144 Tree::Tree* tree = (Tree::Tree*) individual->getGenotype().get();
00145
00146
00147 net.initialize(state_);
00148 net.learningTree = tree;
00149
00150
00151 state_->getContext()->environment = &net;
00152
00153
00154 double error = net.learnNumberOfIterations();
00155
00156
00157 fitness->setValue(error);
00158
00159 return fitness;
00160 }