TPerceptron


This template implements multi layer perceptron (MLP) or multi layerded neural network with fully connected hidden layers using back propagation learning algorithm(or stohastic gradient descent to be more precisely). You can find more information about using this template in the programming guide article about Artificial Intelligence module.

You can find more information in comments below. Overrided methods can be found in corresponding base class.

template<class FLOAT>
class TPerceptron
{
public:
    TPerceptron() = default;

    TPerceptron(
        // Required parameters 
        IActivator<FLOAT> *activator, // Hidden layers activation function 
        IActivator<FLOAT> *activator_output, // Output layer activation function 
        IErrorFunction<FLOAT> *error_function, // Error function 
        const size_type feature_count, // Feature/Input count 
        const size_type output_count, // Output/Class count 
        const std::initializer_list<size_type> &hidden_layers, // Number of neurons in hidden layers 
        // Optional parameters 
        IRandomizer<FLOAT> *randomizer = nullptr, // Randomization function. If empty, weights will be initialized with gauss random distribution with mean = 0 and sigma = 1 / sqrt(weight_count). The same for bias except the sigma is 1 
        IRegularizeFunction<FLOAT> *regularize_function = nullptr, // Regularization function 
        const size_type batch_size = 10, // Learning batch size for Stohastic Gradient Descent. Min = 1 for online learning 
        const FLOAT learn_rate = 0.03, // Learning rate 
        const FLOAT regularize_rate = 0, // Regularization rate. Used when regularization function is not empty 
        const FLOAT momentum_rate = 0 // Momentum rate 
    );

    bool isValid() const; // Return true if net is valid 
    bool isLearning() const; // Return whether the net is in learning mode 
    size_type getBatchSize(); const // Return batch size 
    FLOAT getLearnRate() const; // Return learning rate 
    FLOAT getRegularizeRate() const; // Return regularize rate 
    FLOAT getMomentumRate() const; // Return momentum rate 
    size_type getInputCount() const; // Return input/feature count(number of neurons in input layer) 
    FLOAT getInput(const size_type index); const // Return input value 
    size_type getOutputCount() const; // Return output/class count(number of neurons in output layer) 
    FLOAT getOutput(const size_type index) const; // Return output 
    size_type getLayerCount() const; // Return layer count 
    size_type getNeuronCount(const size_type layer) const; // Return neuron count in specified layer 
    FLOAT getNeuronBias(const size_type layer, const size_type neuron) const; // Return bias value of specified neuron 
    FLOAT getNeuronAmount(const size_type layer, const size_type neuron) const; // Return neuron weighted sum 
    FLOAT getNeuronOutput(const size_type layer, const size_type neuron) const; // Return neuron output(result of applying an activation function to weighted sum of the neuron) 
    IActivator<FLOAT> *getNeuronActivator(const size_type layer, const size_type neuron) const; // Return activation function of specified neuron 
    FLOAT getNeuronAccumulatedInputDerivative(const size_type layer, const size_type neuron) const; // Return accumulated input derivative of specified neuron 
    size_type getNeuronAccumulatedInputDerivativeCount(const size_type layer, const size_type neuron) const; // Return accumulated input derivative count of specified neuron 
    bool getNeuronState(const size_type layer, const size_type neuron, FLOAT &bias, FLOAT &amount, FLOAT &output) const; // Return specified neuron bias, weighted sum, and output 
    size_type getWeightCount(const size_type layer, const size_type neuron) const; // Return weight count of specified neuron 
    FLOAT getWeight(const size_type layer, const size_type neuron, const size_type weight) const; // Return specified weight 
    FLOAT getWeightAccumulatedErrorDerivative(const size_type layer, const size_type neuron, const size_type weight) const; // Return accumulated error derivative of specified weight 
    size_type getWeightAccumulatedErrorDerivativeCount(const size_type layer, const size_type neuron, const size_type weight) const; // Return accumulated error derivative count of specified weight 
    bool isWeightDead(const size_type layer, const size_type neuron, const size_type weight) const; // Return whether specified weight is not active 

    bool setLearning(const bool value); // Set leraning mode 
    bool setBatchSize(const size_type value); // Set batch size 
    bool setLearnRate(const FLOAT value); // Set learning rate 
    bool setRegularizeRate(const FLOAT value); // Set regularization rate 
    bool setMomentumRate(const FLOAT value); // Set momentum rate 
    bool setInput(const size_type index, const FLOAT value); // Set input value 
    bool setLayerActivator(const size_type layer, IActivator<FLOAT> *value); // Set activation function to entire layer 
    bool setNeuronBias(const size_type layer, const size_type neuron, const FLOAT value); // Set specified neuron bias 
    bool setNeuronActivator(const size_type layer, const size_type neuron, IActivator<FLOAT> *value); // Set specified neuron activation function 
    bool setWeight(const size_type layer, const size_type neuron, const size_type weight, const FLOAT value); // Set sepcified weight value 
    bool setWeightDead(const size_type layer, const size_type neuron, const size_type weight, const bool value); // Enable/disable specified weight 

    void ResetBatch(); // Reset current batch counter 
    bool Randomize(IRandomizer<FLOAT> *randomizer); // Initialize weights and biases of neurons using specified randomizer 
    bool RandomizeLayer(const size_type layer, IRandomizer<FLOAT> *randomizer); // Initialize weights and biases of neurons of specified layer using specified randomizer 
    bool RandomizeNeuron(const size_type layer, const size_type neuron, IRandomizer<FLOAT> *randomizer); // Initialize weights and biase of specified neurons using specified randomizer 

    bool Forward(const std::vector<FLOAT> &inputs); // Forward step 
    bool Backward(const std::vector<FLOAT> &expected_output); // Backward step 
    bool UpdateWeights(); // Update weights(learn) 

    bool Simulate(const std::vector<FLOAT> &inputs, const std::vector<FLOAT> &expected_output); // Main processing method. Perform forward, backward, and update weights(if needed). 

    FLOAT Loss(const std::vector<FLOAT> &inputs, const std::vector<FLOAT> &expected_output); // Calculate error 
};
Namespace: nitisa::ai
Include: Nitisa/Modules/AI/Perceptron.h