Tesseract
3.02
|
00001 // Copyright 2008 Google Inc. 00002 // All Rights Reserved. 00003 // Author: ahmadab@google.com (Ahmad Abdulkader) 00004 // 00005 // neuron.cpp: The implementation of a class for an object 00006 // that represents a single neuron in a neural network 00007 00008 #include "neuron.h" 00009 #include "input_file_buffer.h" 00010 00011 namespace tesseract { 00012 00013 // Instantiate all supported templates 00014 template bool Neuron::ReadBinary(InputFileBuffer *input_buffer); 00015 00016 // default and only constructor 00017 Neuron::Neuron() { 00018 Init(); 00019 } 00020 00021 // virtual destructor 00022 Neuron::~Neuron() { 00023 } 00024 00025 // Initializer 00026 void Neuron::Init() { 00027 id_ = -1; 00028 frwd_dirty_ = false; 00029 fan_in_.clear(); 00030 fan_in_weights_.clear(); 00031 activation_ = 0.0f; 00032 output_ = 0.0f; 00033 bias_ = 0.0f; 00034 node_type_ = Unknown; 00035 } 00036 00037 // Computes the activation and output of the neuron if not fresh 00038 // by pulling the outputs of all fan-in neurons 00039 void Neuron::FeedForward() { 00040 if (!frwd_dirty_ ) { 00041 return; 00042 } 00043 // nothing to do for input nodes: just pass the input to the o/p 00044 // otherwise, pull the output of all fan-in neurons 00045 if (node_type_ != Input) { 00046 int fan_in_cnt = fan_in_.size(); 00047 // sum out the activation 00048 activation_ = -bias_; 00049 for (int in = 0; in < fan_in_cnt; in++) { 00050 if (fan_in_[in]->frwd_dirty_) { 00051 fan_in_[in]->FeedForward(); 00052 } 00053 activation_ += ((*(fan_in_weights_[in])) * fan_in_[in]->output_); 00054 } 00055 // sigmoid it 00056 output_ = Sigmoid(activation_); 00057 } 00058 frwd_dirty_ = false; 00059 } 00060 00061 // set the type of the neuron 00062 void Neuron::set_node_type(NeuronTypes Type) { 00063 node_type_ = Type; 00064 } 00065 00066 // Adds new connections *to* this neuron *From* 00067 // a target neuron using specfied params 00068 // Note that what is actually copied in this function are pointers to the 00069 // specified Neurons and weights and not the actualt values. This is by 00070 // design to centralize the alloction of neurons and weights and so 00071 // increase the locality of reference and improve cache-hits resulting 00072 // in a faster net. This technique resulted in a 2X-10X speedup 00073 // (depending on network size and processor) 00074 void Neuron::AddFromConnection(Neuron *neurons, 00075 float *wts_offset, 00076 int from_cnt) { 00077 for (int in = 0; in < from_cnt; in++) { 00078 fan_in_.push_back(neurons + in); 00079 fan_in_weights_.push_back(wts_offset + in); 00080 } 00081 } 00082 00083 // fast computation of sigmoid function using a lookup table 00084 // defined in sigmoid_table.cpp 00085 float Neuron::Sigmoid(float activation) { 00086 if (activation <= -10.0f) { 00087 return 0.0f; 00088 } else if (activation >= 10.0f) { 00089 return 1.0f; 00090 } else { 00091 return kSigmoidTable[static_cast<int>(100 * (activation + 10.0))]; 00092 } 00093 } 00094 }