summaryrefslogtreecommitdiffstats
path: root/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src
diff options
context:
space:
mode:
authorSven Eisenhauer <sven@sven-eisenhauer.net>2023-11-10 15:11:48 +0100
committerSven Eisenhauer <sven@sven-eisenhauer.net>2023-11-10 15:11:48 +0100
commit33613a85afc4b1481367fbe92a17ee59c240250b (patch)
tree670b842326116b376b505ec2263878912fca97e2 /Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src
downloadStudium-master.tar.gz
Studium-master.tar.bz2
add new repoHEADmaster
Diffstat (limited to 'Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src')
-rw-r--r--Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.cpp312
-rw-r--r--Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.h64
-rw-r--r--Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.cpp223
-rw-r--r--Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.h164
-rw-r--r--Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/main.cpp401
5 files changed, 1164 insertions, 0 deletions
diff --git a/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.cpp b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.cpp
new file mode 100644
index 0000000..e7abe3b
--- /dev/null
+++ b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.cpp
@@ -0,0 +1,312 @@
+/*
+ * MLP.cpp
+ *
+ * Created on: 09.06.2011
+ * Author: sven
+ */
+#include <boost/foreach.hpp>
+#include <boost/cstdlib.hpp>
+#include <iostream>
+#include <iomanip>
+#include <cmath>
+#include <limits>
+#include <sstream>
+#include "MLP.h"
+
+MLP::MLP(const MLPConfig& mlpConf)
+:mStartHidden(1+mlpConf.getNumInputNeurons()),mStartOutput(mStartHidden+mlpConf.getNumHiddenNeurons())
+,mNoNeuron(mStartOutput + mlpConf.getNumOutputNeurons())
+,mActivity(mNoNeuron), mDelta(mNoNeuron)
+,mMomentum(mlpConf.getMomentum())
+,mLernrate(mlpConf.getLernrate()),mConfig(mlpConf),mTrainSuccess(false),mDoStop(false)
+{
+ mWeights = mlpConf.getWeights();
+ for (uint32_t i = 0; i < mNoNeuron ; i++) {
+ mDeltaWeights.push_back(vector<double>(mNoNeuron));
+ mOldUpdate.push_back(vector<double>(mNoNeuron));
+ }
+ // Bias neuron is always on
+ mActivity.at(0) = 1.0;
+}
+
+MLP::~MLP()
+{
+
+}
+
+void MLP::dump()
+{
+ int width = 12;
+ int prec = 8;
+ cout << "============ MLP config==========" << endl;
+ cout << "start hidden: " << mStartHidden << endl;
+ cout << "start output: " << mStartOutput << endl;
+ cout << "no neuron: " << mNoNeuron << endl;
+
+ cout << "Weights:" << endl;
+ BOOST_FOREACH(vector<double> i, mWeights)
+ {
+ BOOST_FOREACH(double w, i)
+ {
+ cout << setw(width) << fixed << setprecision(prec) << w;
+ }
+ cout << endl;
+ }
+ cout << "Deltaweights:" << endl;
+ BOOST_FOREACH(vector<double> i, mDeltaWeights)
+ {
+ BOOST_FOREACH(double w, i)
+ {
+ cout << setw(width) << setprecision(prec) << w;
+ }
+ cout << endl;
+ }
+ cout << "OldUpdate:" << endl;
+ BOOST_FOREACH(vector<double> i, mOldUpdate)
+ {
+ BOOST_FOREACH(double w, i)
+ {
+ cout << setw(width) << setprecision(prec) << w;
+ }
+ cout << endl;
+ }
+ cout << "Activity:" << endl;
+ BOOST_FOREACH(double w, mActivity)
+ {
+ cout << setw(width) << setprecision(prec) << w;
+ }
+ cout << endl;
+ cout << "=================================" << endl;
+}
+
+double MLP::sigmoid(const double& a)
+{
+ double res = 0.0;
+ res = 1.0 / (1.0 + exp(-a) );
+ return res;
+}
+
+void MLP::propagate(const Pattern& pattern)
+{
+ uint32_t i,j;
+ double activation = 0.0;
+ if (pattern.size() != (mStartHidden - 1) ) {
+ cerr << "Pattern does not match input neurons: " << pattern.size() << " != " << (mStartHidden - 1) << endl;
+ exit(1);
+ }
+ for (i=1 ; i < mStartHidden ; i++) {
+ mActivity.at(i) = pattern.at(i - 1);
+ }
+ for (i=mStartHidden ; i<mStartOutput ; i++) {
+ activation = mActivity.at(0) * mWeights.at(0).at(i);
+ for (j=1 ; j<mStartHidden ; j++) {
+ activation += mActivity.at(j) * mWeights.at(j).at(i);
+ }
+ mActivity.at(i) = sigmoid(activation);
+ }
+ for (i=mStartOutput ; i < mNoNeuron ; i++) {
+ activation = mActivity.at(0) * mWeights.at(0).at(i);
+ for (j=mStartHidden ; j<mStartOutput ; j++) {
+ activation += mActivity.at(j) * mWeights.at(j).at(i);
+ }
+ mActivity.at(i) = sigmoid(activation);
+ }
+}
+void MLP::back_propagate(const Target& target)
+{
+ uint32_t i,j;
+ // injected error
+ for (i=mStartOutput ; i<mNoNeuron ; i++ ) {
+ mDelta.at(i) = (-1.0) * (target.at( i-mStartOutput ) - mActivity.at(i) );
+ mDelta.at(i) *= mActivity.at(i) * ( 1.0 - mActivity.at(i));
+ }
+
+ // implicit error
+ for (i=mStartHidden ; i<mStartOutput ; i++) {
+ for ( j=mStartOutput , mDelta.at(i)=0 ; j<mNoNeuron ; j++) {
+ mDelta.at(i) += mWeights.at(i).at(j) * mDelta.at(j);
+ }
+ mDelta.at(i) *= mActivity.at(i) * ( 1.0 - mActivity.at(i) );
+ }
+
+ // bias weight deltas
+ for (j=mStartHidden ; j<mNoNeuron ; j++) {
+ mDeltaWeights.at(0).at(j) += (mActivity.at(0) * mDelta.at(j));
+ }
+
+ // input to hidden weight deltas
+ for (i=1 ; i<mStartHidden ; i++) {
+ for(j=mStartHidden;j < mStartOutput;j++) {
+ mDeltaWeights.at(i).at(j) += (mActivity.at(i) * mDelta.at(j));
+ }
+ }
+
+ // hidden to output weight deltas
+ for(i=mStartHidden;i < mStartOutput;i++) {
+ for(j=mStartOutput;j < mNoNeuron;j++) {
+ mDeltaWeights.at(i).at(j) += (mActivity.at(i) * mDelta.at(j));
+ }
+ }
+}
+
+void MLP::update_weight()
+{
+ uint32_t i,j;
+ double update;
+ mOldWeights = mWeights;
+ for(i=0;i < mNoNeuron;i++) {
+ for(j=0;j < mNoNeuron;j++) {
+ update = (-1.) * mLernrate * mDeltaWeights.at(i).at(j);
+ update += mMomentum * mOldUpdate.at(i).at(j);
+ mWeights.at(i).at(j) += update;
+ mOldUpdate.at(i).at(j) = update;
+ mDeltaWeights.at(i).at(j) = 0.0;
+ }
+ }
+}
+
+void MLP::reset_delta()
+{
+ for (uint32_t i=0 ; i < mNoNeuron ; i++) {
+ mDelta.at(i) = 0.0;
+ }
+}
+
+void MLP::train(const Traindata & traindata, const Traindata& valdata, const uint32_t numCycles, const std::string& outputfilename)
+{
+ Traindata::const_iterator it;
+ double error = numeric_limits<double>::infinity();
+ for (uint32_t i = 0; i < numCycles ; i++) {
+ it = traindata.begin();
+ for( ; it != traindata.end() ; it++) {
+ propagate((*it).mPattern);
+ back_propagate((*it).mTarget);
+ if (mConfig.getUpdateMode() == MLPConfig::UPDATE_MODE_SINGLE) {
+ update_weight();
+ reset_delta();
+ }
+ if (isStop()) {
+ ostringstream oss;
+ oss << "mlp_lastconfig_" << mOldError << ".txt";
+ writeWeightsToFile(oss.str(),mOldWeights);
+ return;
+ }
+ }
+ if (mConfig.getUpdateMode() == MLPConfig::UPDATE_MODE_BATCH) {
+ update_weight();
+ reset_delta();
+ }
+ error = validate(valdata);
+ if (i>0) {
+ cout << "Error: " << error << " cycles: " << (i+1) << endl;
+ if (error < mConfig.getErrorThreshold()) {
+ cout << "Error: " << error << " cycles: " << (i+1) << endl;
+ writeWeightsToFile(outputfilename,mWeights);
+ mTrainSuccess = true;
+ return;
+ }
+ // error increases on a low level
+ if ( (error < mConfig.getConfigAcceptanceErrorThreshold() ) && (mOldError < error) ) {
+ cout << "Olderror: " << mOldError << " Error: " << error << " cycles: " << (i+1) << endl;
+ writeWeightsToFile(outputfilename,mOldWeights);
+ mTrainSuccess = true;
+ return;
+ }
+ mOldError = error;
+ }
+ }
+ if (!mTrainSuccess) {
+ ostringstream oss;
+ oss << "mlp_lastconfig_" << error << ".txt";
+ writeWeightsToFile(oss.str(),mWeights);
+ }
+}
+
+double MLP::validate(const Traindata & valdata) {
+ double error = 0.0;
+ if (valdata.empty()) {
+ return numeric_limits<double>::infinity();
+ }
+ uint32_t j;
+ Traindata::const_iterator it = valdata.begin();
+ for( ; it != valdata.end() ; it++) {
+ propagate((*it).mPattern);
+ for(j=mStartOutput;j < mNoNeuron;j++)
+ {
+ error += pow(((*it).mTarget.at(j - mStartOutput) - mActivity.at(j)),2);
+ }
+ }
+ return error;
+}
+
+void MLP::writeWeightsToFile(const string& filename, const Weights& weights)
+{
+ ofstream outFile;
+ outFile.open(filename.c_str(),ios::out);
+ uint32_t i,j;
+
+ outFile << "Bias: 0" << endl;
+ outFile << "Input:";
+ for (i=1; i<mStartHidden ; i++) {
+ outFile << " " << i;
+ }
+ outFile << endl;
+
+ outFile << "Hidden:";
+ for (i=mStartHidden; i<mStartOutput ; i++) {
+ outFile << " " << i;
+ }
+ outFile << endl;
+
+ outFile << "Output:";
+ for (i=mStartOutput; i<mNoNeuron ; i++) {
+ outFile << " " << i;
+ }
+ outFile << endl;
+
+ outFile << "Threshold" << endl;
+ for ( i=mStartHidden ; i < mNoNeuron ; i++ ) {
+ outFile << "0 " << i << " " << scientific << weights.at(0).at(i) << endl;
+ }
+ outFile << "Input -> Hidden" << endl;
+ for (i=1; i<mStartHidden; i++) {
+ for (j=mStartHidden; j<mStartOutput; j++) {
+ outFile << i << " " << j << " " << scientific << weights.at(i).at(j) << endl;
+ }
+ }
+ outFile << "Hidden -> Output" << endl;
+ for (i=mStartHidden; i<mStartOutput; i++) {
+ for (j=mStartOutput; j<mNoNeuron; j++) {
+ outFile << i << " " << j << " " << scientific << weights.at(i).at(j) << endl;
+ }
+ }
+ outFile.close();
+}
+
+void MLP::propagate(const Pattern &pattern, Output &result)
+{
+ propagate(pattern);
+ for (uint32_t i=mStartOutput; i<mNoNeuron ; i++) {
+ result.push_back(mActivity.at(i));
+ }
+}
+
+void MLP::stop()
+{
+ //cout << __FUNCTION__ << " locking mutex" << endl;
+ mMutex.lock();
+ mDoStop = true;
+ mMutex.unlock();
+ //cout << __FUNCTION__ << " unlocking mutex" << endl;
+}
+
+bool MLP::isStop()
+{
+ bool res;
+ //cout << __FUNCTION__ << " locking mutex" << endl;
+ mMutex.lock();
+ res = mDoStop;
+ mMutex.unlock();
+ //cout << __FUNCTION__ << " unlocking mutex" << endl;
+ return res;
+} \ No newline at end of file
diff --git a/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.h b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.h
new file mode 100644
index 0000000..fdb32a3
--- /dev/null
+++ b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLP.h
@@ -0,0 +1,64 @@
+/*
+ * MLP.h
+ *
+ * Created on: 09.06.2011
+ * Author: sven
+ */
+
+#ifndef MLP_H_
+#define MLP_H_
+
+#include "MLPConfig.h"
+#include <boost/thread.hpp>
+
+typedef vector<double> Activity;
+typedef vector<double> Delta;
+typedef vector<double> Pattern;
+typedef vector<double> Target;
+typedef vector<double> Output;
+
+struct Trainingpair {
+ Pattern mPattern;
+ Target mTarget;
+};
+
+typedef vector<Trainingpair> Traindata;
+
+class MLP {
+public:
+ MLP(const MLPConfig&);
+ virtual ~MLP();
+ void train(const Traindata&, const Traindata&, const uint32_t, const std::string&);
+ void propagate(const Pattern&, Output& result);
+ void stop();
+
+private:
+ uint32_t mStartHidden;
+ uint32_t mStartOutput;
+ uint32_t mNoNeuron;
+ Weights mWeights;
+ Activity mActivity;
+ Delta mDelta;
+ Weights mDeltaWeights;
+ Weights mOldUpdate;
+ double mMomentum;
+ double mLernrate;
+ const MLPConfig& mConfig;
+ double mOldError;
+ Weights mOldWeights;
+ bool mTrainSuccess;
+ bool mDoStop;
+ boost::mutex mMutex;
+
+ double sigmoid(const double&);
+ void propagate(const Pattern&);
+ void back_propagate(const Target&);
+ void update_weight();
+ void reset_delta();
+ double validate(const Traindata&);
+ void dump();
+ void writeWeightsToFile(const string&, const Weights&);
+ bool isStop();
+};
+
+#endif /* MLP_H_ */
diff --git a/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.cpp b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.cpp
new file mode 100644
index 0000000..067a0d8
--- /dev/null
+++ b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.cpp
@@ -0,0 +1,223 @@
+/*
+ * MLPConfig.cpp
+ *
+ * Created on: 09.06.2011
+ * Author: sven
+ */
+
+#include "MLPConfig.h"
+
+#include <iostream>
+#include <string>
+#include <sstream>
+#include <iomanip>
+#include <boost/tokenizer.hpp>
+#include <boost/foreach.hpp>
+#include <time.h>
+
+typedef tokenizer<boost::char_separator<char> > MLP_Tokenizer;
+const uint32_t DEFAULT_NUMCYCLES = 20000U;
+const double DEFAULT_ERROR_THRESHOLD = 0.01;
+const double DEFAULT_CONFIGACCEPTANCE_THRESHOLD = 0.3;
+
+MLPConfig::MLPConfig()
+:mNumInputNeurons(0),mNumHiddenNeurons(0),mNumOutputNeurons(0),mValid(false)
+,mErrorThreshold(DEFAULT_ERROR_THRESHOLD),mNumTrainingCycles(DEFAULT_NUMCYCLES)
+,mConfigAcceptanceErrorThreshold(DEFAULT_CONFIGACCEPTANCE_THRESHOLD)
+{
+
+}
+MLPConfig::MLPConfig(const char* configFileName)
+:mNumInputNeurons(0),mNumHiddenNeurons(0),mNumOutputNeurons(0),mValid(false)
+,mErrorThreshold(DEFAULT_ERROR_THRESHOLD),mNumTrainingCycles(DEFAULT_NUMCYCLES)
+{
+ parseConfigfile(configFileName);
+}
+
+MLPConfig::~MLPConfig() {
+ // TODO Auto-generated destructor stub
+}
+bool MLPConfig::parseConfigfile(ifstream& configFile)
+{
+ string line;
+ while( configFile.good() )
+ {
+ getline(configFile,line);
+ if( !parseLine(line) )
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool MLPConfig::parseLine(string& line)
+{
+ vector<string> tokens;
+ char_separator<char> sep(":");
+ MLP_Tokenizer tok(line,sep);
+ MLP_Tokenizer::iterator it;
+ BOOST_FOREACH(string t, tok)
+ {
+ tokens.push_back(t);
+ }
+ if (tokens.empty()) {
+ return true;
+ }
+ if (tokens.at(0).find("Bias") != string::npos) {
+ return true;
+ }
+ if (tokens.at(0).find("Threshold") != string::npos) {
+ return true;
+ }
+ if (tokens.at(0).find("Input -> Hidden") != string::npos) {
+ return true;
+ }
+ if (tokens.at(0).find("Hidden -> Output") != string::npos) {
+ return true;
+ }
+ if (tokens.at(0).find("Input") != string::npos) {
+ MLP_Tokenizer inpTok(tokens.at(1));
+ for (it = inpTok.begin() ;it != inpTok.end() ; ++it) {
+ mNumInputNeurons++;
+ }
+ return true;
+ }
+ if (tokens.at(0).find("Hidden") != string::npos) {
+ MLP_Tokenizer hiddenTok(tokens.at(1));
+ for (it = hiddenTok.begin() ;it != hiddenTok.end() ; ++it) {
+ mNumHiddenNeurons++;
+ }
+ return true;
+ }
+ if (tokens.at(0).find("Output") != string::npos) {
+ MLP_Tokenizer outputTok(tokens.at(1));
+ for (it = outputTok.begin() ;it != outputTok.end() ; ++it) {
+ mNumOutputNeurons++;
+ }
+ return true;
+ }
+ if (mWeights.empty()) {
+ initWeights(false);
+ }
+ char_separator<char> weightSep(" ");
+ MLP_Tokenizer weightTok(tokens.at(0),weightSep);
+ it = weightTok.begin();
+ uint32_t from = toUint32(*it);
+ it++;
+ uint32_t to = toUint32(*it);
+ it++;
+ double w = toDouble(*it);
+ mWeights.at(from).at(to) = w;
+ return true;
+}
+
+void MLPConfig::initWeights(bool randomWeights)
+{
+ uint32_t i,j;
+ uint32_t numNeurons = 1 /*Bias*/ + mNumInputNeurons + mNumHiddenNeurons + mNumOutputNeurons;
+ for (i = 0; i< numNeurons; i++) {
+ mWeights.push_back(vector<double>(numNeurons));
+ }
+ double randWeight = 0.0;
+ if (randomWeights) {
+ srand(time(NULL));
+ uint32_t startHidden = 1 + mNumInputNeurons;
+ uint32_t startOutput = startHidden + mNumHiddenNeurons;
+ uint32_t noNeuron = startOutput + mNumOutputNeurons;
+ // Bias -> Hidden
+ for (i = startHidden; i < startOutput; i++) {
+ randWeight = getRandomWeight();
+ if ( (randWeight < -0.5) || (randWeight > 0.5) ) {
+ cout << randWeight << endl;
+ }
+ mWeights.at(0).at(i) = randWeight;
+ }
+ // Bias -> Output
+ for (i = startOutput; i < noNeuron; i++) {
+ randWeight = getRandomWeight();
+ if ( (randWeight < -0.5) || (randWeight > 0.5) ) {
+ cout << randWeight << endl;
+ }
+ mWeights.at(0).at(i) = randWeight;
+ }
+ // Input -> Hidden
+ for (i=1; i < startHidden; i++) {
+ for (j=startHidden ; j<startOutput; j++) {
+ randWeight = getRandomWeight();
+ if ( (randWeight < -0.5) || (randWeight > 0.5) ) {
+ cout << randWeight << endl;
+ }
+ mWeights.at(i).at(j) = randWeight;
+ }
+ }
+ // Hidden -> Output
+ for (i=startHidden; i < startOutput; i++) {
+ for (j=startOutput ; j<noNeuron; j++) {
+ randWeight = getRandomWeight();
+ if ( (randWeight < -0.5) || (randWeight > 0.5) ) {
+ cout << randWeight << endl;
+ }
+ mWeights.at(i).at(j) = randWeight;
+ }
+ }
+ }
+}
+
+uint32_t MLPConfig::toUint32(const string& t)
+{
+ uint32_t res;
+ istringstream ss(t);
+ ss >> res;
+ return res;
+}
+
+double MLPConfig::toDouble(const string& t)
+{
+ double res;
+ istringstream ss(t);
+ ss >> res;
+ return res;
+}
+
+void MLPConfig::dump()
+{
+ int width = 12;
+ int prec = 8;
+ cout << "============ MLP config==========" << endl;
+ cout << "Lernrate: " << mLernrate << endl;
+ cout << "Momentum: " << mMomentum << endl;
+ cout << "Updatemode: " << (mUpdateMode==UPDATE_MODE_BATCH?"batch":"single") << endl;
+ cout << "Input neurons: " << mNumInputNeurons << endl;
+ cout << "Hidden neurons: " << mNumHiddenNeurons << endl;
+ cout << "Output neurons: " << mNumOutputNeurons << endl;
+
+ cout << "Weights:" << endl;
+ BOOST_FOREACH(vector<double> i, mWeights)
+ {
+ BOOST_FOREACH(double w, i)
+ {
+ cout << setw(width) << setprecision(prec) << w;
+ }
+ cout << endl;
+ }
+ cout << "=================================" << endl;
+}
+bool MLPConfig::parseConfigfile(const char* configfilename)
+{
+ ifstream inFile(configfilename);
+ if (!inFile.is_open()) {
+ mValid = false;
+ return mValid;
+ }
+ mValid = parseConfigfile(inFile);
+ inFile.close();
+ return mValid;
+}
+double MLPConfig::getRandomWeight()
+{
+ int prec = 10000;
+ int x = rand() % prec;
+ x -= prec / 2;
+ return ((double) x) / ((double) prec);
+}
diff --git a/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.h b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.h
new file mode 100644
index 0000000..0d7cd21
--- /dev/null
+++ b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/MLPConfig.h
@@ -0,0 +1,164 @@
+/*
+ * MLPConfig.h
+ *
+ * Created on: 09.06.2011
+ * Author: sven
+ */
+
+#ifndef MLPCONFIG_H_
+#define MLPCONFIG_H_
+
+#include <boost/cstdint.hpp>
+#include <vector>
+#include <fstream>
+
+using namespace std;
+using namespace boost;
+
+typedef vector< vector<double> > Weights;
+
+class MLPConfig {
+public:
+ MLPConfig();
+ MLPConfig(const char* configfile);
+ virtual ~MLPConfig();
+
+ enum UPDATE_MODE
+ {
+ UPDATE_MODE_SINGLE,
+ UPDATE_MODE_BATCH
+ };
+ enum OUTPUT_TRANSFER_FUNC
+ {
+ OUTPUT_TRANSFER_FUNC_NORMAL
+ };
+
+ double getLernrate() const
+ {
+ return mLernrate;
+ }
+
+ double getMomentum() const
+ {
+ return mMomentum;
+ }
+
+ uint32_t getNumHiddenNeurons() const
+ {
+ return mNumHiddenNeurons;
+ }
+
+ uint32_t getNumInputNeurons() const
+ {
+ return mNumInputNeurons;
+ }
+
+ uint32_t getNumOutputNeurons() const
+ {
+ return mNumOutputNeurons;
+ }
+
+ OUTPUT_TRANSFER_FUNC getOutputTransferFunc() const
+ {
+ return mOutputTransferFunc;
+ }
+
+ UPDATE_MODE getUpdateMode() const
+ {
+ return mUpdateMode;
+ }
+
+ void setLernrate(double mLernrate)
+ {
+ this->mLernrate = mLernrate;
+ }
+
+ void setMomentum(double mMomentum)
+ {
+ this->mMomentum = mMomentum;
+ }
+
+ void setNumHiddenNeurons(uint32_t mNumHiddenNeurons)
+ {
+ this->mNumHiddenNeurons = mNumHiddenNeurons;
+ }
+
+ void setNumInputNeurons(uint32_t mNumInputNeurons)
+ {
+ this->mNumInputNeurons = mNumInputNeurons;
+ }
+
+ void setNumOutputNeurons(uint32_t mNumOutputNeurons)
+ {
+ this->mNumOutputNeurons = mNumOutputNeurons;
+ }
+
+ void setOutputTransferFunc(OUTPUT_TRANSFER_FUNC mOutputTransferFunc)
+ {
+ this->mOutputTransferFunc = mOutputTransferFunc;
+ }
+
+ void setUpdateMode(UPDATE_MODE mUpdateMode)
+ {
+ this->mUpdateMode = mUpdateMode;
+ }
+
+ bool isValid(){
+ return mValid;
+ }
+
+ const Weights& getWeights() const {
+ return mWeights;
+ }
+
+ const double& getErrorThreshold() const {
+ return mErrorThreshold;
+ }
+
+ void setErrorThreshold(double e) {
+ mErrorThreshold = e;
+ }
+
+ const double& getConfigAcceptanceErrorThreshold() const {
+ return mConfigAcceptanceErrorThreshold;
+ }
+
+ void setConfigAcceptanceErrorThreshold(double e) {
+ mConfigAcceptanceErrorThreshold = e;
+ }
+
+ uint32_t getNumTrainingCycles() const {
+ return mNumTrainingCycles;
+ }
+
+ void setNumTrainingCycles(uint32_t c)
+ {
+ mNumTrainingCycles=c;
+ }
+
+ void dump();
+ void initWeights(bool);
+ bool parseConfigfile(const char*);
+
+private:
+ uint32_t mNumInputNeurons;
+ uint32_t mNumHiddenNeurons;
+ uint32_t mNumOutputNeurons;
+ UPDATE_MODE mUpdateMode;
+ OUTPUT_TRANSFER_FUNC mOutputTransferFunc;
+ double mLernrate;
+ double mMomentum;
+ Weights mWeights;
+ bool mValid;
+ double mErrorThreshold;
+ uint32_t mNumTrainingCycles;
+ double mConfigAcceptanceErrorThreshold;
+
+ bool parseConfigfile(ifstream&);
+ bool parseLine(string&);
+ uint32_t toUint32(const string&);
+ double toDouble(const string &);
+ double getRandomWeight();
+};
+
+#endif /* MLPCONFIG_H_ */
diff --git a/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/main.cpp b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/main.cpp
new file mode 100644
index 0000000..6980f5f
--- /dev/null
+++ b/Master/Modellbildung_und_Simulation/Aufgabenblatt3/MLP/src/main.cpp
@@ -0,0 +1,401 @@
+/*
+ * main.cpp
+ *
+ * Created on: 09.06.2011
+ * Author: sven
+ */
+
+#include "MLPConfig.h"
+#include "MLP.h"
+
+#include <boost/foreach.hpp>
+#include <boost/program_options.hpp>
+#include <boost/tokenizer.hpp>
+#include <string>
+#include <vector>
+#include <iostream>
+#include <cstdlib>
+#include <cmath>
+#include <csignal>
+
+namespace po = boost::program_options;
+
+const char* MLP_XOR_CONFIGFILE = "Init_MLP.txt";
+const char* MLP_PCA_TRAINFILE = "train_pca";
+const char* MLP_PCA_TESTFILE = "test_pca";
+const char* MLP_RAW_TRAINFILE = "train_raw";
+const char* MLP_RAW_TESTFILE = "test_raw";
+const char* MLP_PCA_CONFIGFILE = "mlp_pca_config.txt";
+const char* MLP_RAW_CONFIGFILE = "mlp_raw_config.txt";
+
+const uint32_t VALIDATION_DATA_RATIO = 20; /* every x th value will become a validition pattern */
+const double EPSILON = 0.001;
+
+void mlp_xor(MLPConfig& config);
+void mlp_train_pca(MLPConfig& config);
+void mlp_train_raw(MLPConfig& config);
+void mlp_test_pca(MLPConfig& config);
+void mlp_test_raw(MLPConfig& config);
+void parseTrainData(const char*,Traindata&,Traindata&);
+void signal_handler(int signal);
+static MLP* mlpPtr = NULL;
+
+void signal_handler(int signal)
+{
+ std::cout << "Terminating..." << std::endl;
+ if (mlpPtr) {
+ mlpPtr->stop();
+ }
+ mlpPtr = NULL;
+}
+
+double toDouble(const std::string& t)
+{
+ double res;
+ std::istringstream ss(t);
+ ss >> res;
+ return res;
+}
+
+void parseTrainData(const char* filename, Traindata& td, Traindata& vd)
+{
+ typedef boost::char_separator<char> TrainSepChar;
+ typedef tokenizer<TrainSepChar> TrainTokenizer;
+ TrainSepChar sepCol(":");
+ TrainSepChar sepSpace(" ");
+ ifstream inFile(filename);
+ if (!inFile.is_open()) {
+ std::cerr << "error opening data file " << filename << std::endl;
+ exit(1);
+ }
+ std::string line;
+ uint32_t counter = 0;
+ while(inFile.good())
+ {
+ getline(inFile,line);
+ if (line.empty()) {
+ continue;
+ }
+ TrainTokenizer lineTok(line,sepCol);
+ std::vector<std::string> lineToks;
+ BOOST_FOREACH(string t, lineTok)
+ {
+ lineToks.push_back(t);
+ }
+ Trainingpair tp;
+ TrainTokenizer targetTok(lineToks.at(0),sepSpace);
+ BOOST_FOREACH(string t,targetTok)
+ {
+ tp.mTarget.push_back(toDouble(t));
+ }
+ TrainTokenizer patternTok(lineToks.at(1),sepSpace);
+ BOOST_FOREACH(string t,patternTok)
+ {
+ if (t.find(";") != std::string::npos) {
+ continue;
+ }
+ tp.mPattern.push_back(toDouble(t));
+ }
+ if ( (counter % VALIDATION_DATA_RATIO) != 0 ) {
+ td.push_back(tp);
+ } else {
+ vd.push_back(tp);
+ }
+ counter++;
+ }
+ inFile.close();
+}
+
+void mlp_train_pca(MLPConfig& config)
+{
+ config.setNumInputNeurons(90U);
+ config.setNumHiddenNeurons(90U);
+ config.setNumOutputNeurons(2U);
+ config.initWeights(true);
+ MLP mlp(config);
+ mlpPtr = &mlp;
+ Traindata td;
+ Traindata vd;
+ parseTrainData(MLP_PCA_TRAINFILE,td,vd);
+ mlp.train(td,vd,config.getNumTrainingCycles(),MLP_PCA_CONFIGFILE);
+}
+
+void mlp_test_pca(MLPConfig& config)
+{
+ config.parseConfigfile(MLP_PCA_CONFIGFILE);
+ if (!config.isValid()) {
+ std::cerr << "error parsing config file " << MLP_PCA_CONFIGFILE << std::endl;
+ exit(1);
+ }
+ MLP mlp(config);
+ Traindata td;
+ Output output;
+ parseTrainData(MLP_PCA_TESTFILE,td,td);
+ uint32_t c;
+ uint32_t num = 1;
+ uint32_t errorCount = 0;
+ double out,target;
+ double res[2];
+ BOOST_FOREACH(Trainingpair tp, td) {
+ output.clear();
+ mlp.propagate(tp.mPattern,output);
+ bool errorFound = false;
+ for (c = 0; c < 2; c++) {
+ out = output.at(c);
+ if (out >= 0.5) {
+ res[c] = 1.0;
+ } else {
+ res[c] = 0.0;
+ }
+ }
+ for (c = 0; c < 2; c++) {
+ target = tp.mTarget.at(c);
+ if (std::abs(res[c] - target) > EPSILON) {
+ cout << num << " PCA Testerror: " << res[c] << " != " << target << endl;
+ errorFound = true;
+ } else {
+ cout << num << " PCA OK: " << res[c] << " == " << target << endl;
+ }
+ }
+ if (errorFound) {
+ errorCount++;
+ }
+ num++;
+ }
+ cout << "PCA: " << errorCount << " errors in " << td.size() << " testpatterns" << endl;
+}
+
+void mlp_train_raw(MLPConfig& config)
+{
+ config.setNumInputNeurons(6U*151U);
+ config.setNumHiddenNeurons(6U*151U);
+ config.setNumOutputNeurons(2U);
+ config.initWeights(true);
+ MLP mlp(config);
+ mlpPtr = &mlp;
+ Traindata td;
+ Traindata vd;
+ parseTrainData(MLP_RAW_TRAINFILE,td,vd);
+ mlp.train(td,vd,config.getNumTrainingCycles(),MLP_RAW_CONFIGFILE);
+}
+
+void mlp_test_raw(MLPConfig& config)
+{
+ config.parseConfigfile(MLP_RAW_CONFIGFILE);
+ if (!config.isValid()) {
+ std::cerr << "error parsing config file " << MLP_RAW_CONFIGFILE << std::endl;
+ exit(1);
+ }
+ MLP mlp(config);
+ Traindata td;
+ Output output;
+ parseTrainData(MLP_RAW_TESTFILE,td,td);
+ uint32_t c;
+ uint32_t num = 1;
+ uint32_t errorCount = 0;
+ double out,target;
+ double res[2];
+ BOOST_FOREACH(Trainingpair tp, td) {
+ output.clear();
+ mlp.propagate(tp.mPattern,output);
+ bool errorFound = false;
+ for (c = 0; c < 2; c++) {
+ out = output.at(c);
+ if (out >= 0.5) {
+ res[c] = 1.0;
+ } else {
+ res[c] = 0.0;
+ }
+ }
+ for (c = 0; c < 2; c++) {
+ target = tp.mTarget.at(c);
+ if (std::abs(res[c] - target) > EPSILON) {
+ cout << num << " RAW Testerror: " << res[c] << " != " << target << endl;
+ errorFound = true;
+ } else {
+ cout << num << " RAW OK: " << res[c] << " == " << target << endl;
+ }
+ }
+ if (errorFound) {
+ errorCount++;
+ }
+ num++;
+ }
+ cout << "RAW: " << errorCount << " errors in " << td.size() << " testpatterns" << endl;
+}
+
+void mlp_xor(MLPConfig& config) {
+ MLP mlp(config);
+ uint8_t numPatterns = 4;
+ Traindata td;
+ const double patterns[][2] = {
+ {0.0 , 0.0},
+ {0.0 , 1.0},
+ {1.0 , 0.0},
+ {1.0 , 1.0}
+ };
+ const double targets[][1] = {
+ {0},
+ {1},
+ {1},
+ {0}
+ };
+
+ for (uint8_t i = 0; i < numPatterns ; i++) {
+ Trainingpair tp;
+ tp.mPattern.assign(patterns[i],patterns[i]+2);
+ tp.mTarget.assign(targets[i],targets[i]+1);
+ td.push_back(tp);
+ }
+
+ mlp.train(td,td,config.getNumTrainingCycles(),"mlp_xor_weights.txt");
+
+ // test with other data (Aufgabe 3)
+ int32_t i,j;
+ int32_t minval = -40;
+ int32_t maxval = 50;
+ double x,y;
+ Output result;
+ Pattern test_pat;
+ ofstream csvFile;
+ const char* sep = " ";
+ csvFile.open("mlp_xor_out.dat",ios::out);
+ for (i = minval ; i < maxval ; i++)
+ {
+ x = ((double) i) / 10.0;
+ for (j = minval ; j < maxval ; j++) {
+ y = ((double) j) / 10.0;
+ test_pat.clear();
+ test_pat.push_back(x);
+ test_pat.push_back(y);
+ result.clear();
+ mlp.propagate(test_pat,result);
+ csvFile << x << sep << y;
+ BOOST_FOREACH(double z,result)
+ {
+ csvFile << sep << z;
+ }
+ csvFile << endl;
+ }
+ csvFile << endl;
+ }
+ csvFile.close();
+ system("gnuplot -persist mlp_xor.plt");
+}
+
+int main(int argc, char* argv[])
+{
+ signal(SIGINT,&signal_handler);
+
+ MLPConfig config;
+
+ enum APP {
+ APP_XOR,APP_TRAIN_RAW,APP_TRAIN_PCA,APP_TEST_RAW,APP_TEST_PCA
+ };
+
+ // default settings
+ config.setLernrate(0.8);
+ config.setUpdateMode(MLPConfig::UPDATE_MODE_BATCH);
+ config.setMomentum(0.9);
+ config.setErrorThreshold(0.01);
+ config.setConfigAcceptanceErrorThreshold(0.1);
+ config.setNumTrainingCycles(20000U);
+ APP app = APP_XOR;
+
+ try {
+ po::options_description desc("Allowed options");
+ desc.add_options()
+ ("help", "produce help message")
+ ("trainingcycles", po::value<uint32_t>(), "set number of training cycles. default 20000")
+ ("lernrate", po::value<double>(), "set lernrate. default 0.8")
+ ("momentum", po::value<double>(), "set momentum. default 0.9")
+ ("maxerror", po::value<double>(), "set maximum error that stops training. default 0.01")
+ ("acceptanceerror", po::value<double>(), "set error after which the configuration is accepted if error increases. default 0.1")
+ ("updatemode", po::value<std::string>(), "set update mode <\"batch\"|\"single\">. default batch")
+ ("app", po::value<std::string>(), "application <\"xor\"|\"trainraw\"|\"trainpca\"|\"testraw\"|\"testpca\">. default xor")
+ ;
+
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc, argv, desc), vm);
+ po::notify(vm);
+
+ if (vm.count("help")) {
+ cout << desc << "\n";
+ return 1;
+ }
+ if (vm.count("trainingcycles")) {
+ uint32_t trainingcycles = vm["trainingcycles"].as<uint32_t>();
+ config.setNumTrainingCycles(trainingcycles);
+ }
+ if (vm.count("lernrate")) {
+ double lernrate = vm["lernrate"].as<double>();
+ config.setLernrate(lernrate);
+ }
+ if (vm.count("momentum")) {
+ double momentum = vm["momentum"].as<double>();
+ config.setMomentum(momentum);
+ }
+ if (vm.count("maxerror")) {
+ double maxerror = vm["maxerror"].as<double>();
+ config.setErrorThreshold(maxerror);
+ }
+ if (vm.count("momentum")) {
+ double acceptanceerror = vm["acceptanceerror"].as<double>();
+ config.setConfigAcceptanceErrorThreshold(acceptanceerror);
+ }
+ if (vm.count("updatemode")) {
+ std::string updatemode = vm["updatemode"].as<std::string>();
+ if (updatemode.find("batch") != std::string::npos) {
+ config.setUpdateMode(MLPConfig::UPDATE_MODE_BATCH);
+ } else if(updatemode.find("single") != std::string::npos) {
+ config.setUpdateMode(MLPConfig::UPDATE_MODE_SINGLE);
+ }
+ }
+ if (vm.count("app")) {
+ std::string appOpt = vm["app"].as<std::string>();
+ if (appOpt.find("xor") != std::string::npos) {
+ app = APP_XOR;
+ } else if (appOpt.find("trainraw") != std::string::npos) {
+ app = APP_TRAIN_RAW;
+ } else if (appOpt.find("trainpca") != std::string::npos) {
+ app = APP_TRAIN_PCA;
+ } else if (appOpt.find("testraw") != std::string::npos) {
+ app = APP_TEST_RAW;
+ } else if (appOpt.find("testpca") != std::string::npos) {
+ app = APP_TEST_PCA;
+ }
+ }
+ }
+ catch(std::exception& e) {
+ cerr << "error: " << e.what() << "\n";
+ return 1;
+ }
+ catch(...) {
+ cerr << "Exception of unknown type!\n";
+ }
+ switch(app) {
+ case APP_XOR:
+ config.parseConfigfile(MLP_XOR_CONFIGFILE);
+ if (!config.isValid()) {
+ std::cerr << "error parsing config file " << MLP_XOR_CONFIGFILE << std::endl;
+ exit(1);
+ }
+ mlp_xor(config);
+ break;
+ case APP_TRAIN_PCA:
+ mlp_train_pca(config);
+ break;
+ case APP_TRAIN_RAW:
+ mlp_train_raw(config);
+ break;
+ case APP_TEST_PCA:
+ mlp_test_pca(config);
+ break;
+ case APP_TEST_RAW:
+ mlp_test_raw(config);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}