/usr/include/root/TNeuron.h is in libroot-math-mlp-dev 5.34.30-0ubuntu8.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | // @(#)root/mlp:$Id$
// Author: Christophe.Delaere@cern.ch 20/07/03
/*************************************************************************
* Copyright (C) 1995-2003, Rene Brun and Fons Rademakers. *
* All rights reserved. *
* *
* For the licensing terms see $ROOTSYS/LICENSE. *
* For the list of contributors see $ROOTSYS/README/CREDITS. *
*************************************************************************/
#ifndef ROOT_TNeuron
#define ROOT_TNeuron
#ifndef ROOT_TNamed
#include "TNamed.h"
#endif
#ifndef ROOT_TObjArray
#include "TObjArray.h"
#endif
class TTreeFormula;
class TSynapse;
class TBranch;
class TTree;
class TFormula;
//____________________________________________________________________
//
// TNeuron
//
// This class decribes an elementary neuron, which is the basic
// element for a Neural Network.
// A network is build connecting neurons by synapses.
// There are different types of neurons: linear (a+bx),
// sigmoid (1/(1+exp(-x)), tanh or gaussian.
// In a Multi Layer Perceptron, the input layer is made of
// inactive neurons (returning the normalized input), hidden layers
// are made of sigmoids and output neurons are linear.
//
// This implementation contains several methods to compute the value,
// the derivative, the DeDw, ...
// Values are stored in local buffers. The SetNewEvent() method is
// there to inform buffered values are outdated.
//
//____________________________________________________________________
class TNeuron : public TNamed {
friend class TSynapse;
public:
enum ENeuronType { kOff, kLinear, kSigmoid, kTanh, kGauss, kSoftmax, kExternal };
TNeuron(ENeuronType type = kSigmoid,
const char* name = "", const char* title = "",
const char* extF = "", const char* extD = "" );
virtual ~TNeuron() {}
inline TSynapse* GetPre(Int_t n) const { return (TSynapse*) fpre.At(n); }
inline TSynapse* GetPost(Int_t n) const { return (TSynapse*) fpost.At(n); }
inline TNeuron* GetInLayer(Int_t n) const { return (TNeuron*) flayer.At(n); }
TTreeFormula* UseBranch(TTree*, const char*);
Double_t GetInput() const;
Double_t GetValue() const;
Double_t GetDerivative() const;
Double_t GetError() const;
Double_t GetTarget() const;
Double_t GetDeDw() const;
Double_t GetBranch() const;
ENeuronType GetType() const;
void SetWeight(Double_t w);
inline Double_t GetWeight() const { return fWeight; }
void SetNormalisation(Double_t mean, Double_t RMS);
inline const Double_t* GetNormalisation() const { return fNorm; }
void SetNewEvent() const;
void SetDEDw(Double_t in);
inline Double_t GetDEDw() const { return fDEDw; }
void ForceExternalValue(Double_t value);
void AddInLayer(TNeuron*);
protected:
Double_t Sigmoid(Double_t x) const;
Double_t DSigmoid(Double_t x) const;
void AddPre(TSynapse*);
void AddPost(TSynapse*);
private:
TNeuron(const TNeuron&); // Not implemented
TNeuron& operator=(const TNeuron&); // Not implemented
TObjArray fpre; // pointers to the previous level in a network
TObjArray fpost; // pointers to the next level in a network
TObjArray flayer; // pointers to the current level in a network (neurons, not synapses)
Double_t fWeight; // weight used for computation
Double_t fNorm[2]; // normalisation to mean=0, RMS=1.
ENeuronType fType; // neuron type
TFormula* fExtF; // function (external mode)
TFormula* fExtD; // derivative (external mode)
//buffers
//should be mutable when supported by all compilers
TTreeFormula* fFormula;//! formula to be used for inputs and outputs
Int_t fIndex; //! index in the formula
Bool_t fNewInput; //! do we need to compute fInput again ?
Double_t fInput; //! buffer containing the last neuron input
Bool_t fNewValue; //! do we need to compute fValue again ?
Double_t fValue; //! buffer containing the last neuron output
Bool_t fNewDeriv; //! do we need to compute fDerivative again ?
Double_t fDerivative; //! buffer containing the last neuron derivative
Bool_t fNewDeDw; //! do we need to compute fDeDw again ?
Double_t fDeDw; //! buffer containing the last derivative of the error
Double_t fDEDw; //! buffer containing the sum over all examples of DeDw
ClassDef(TNeuron, 4) // Neuron for MultiLayerPerceptrons
};
#endif
|