1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
use failure::Error;
use ndarray::Array2;
use ndarray_stats::QuantileExt;
use num::Float;
use std::fmt;

pub mod activate_functions;

#[derive(Default)]
pub struct NeuralNetwork<T> {
    neurons: Array2<T>,
}

impl<T: Float + fmt::Display> fmt::Display for NeuralNetwork<T> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{}", self.neurons)
    }
}

impl<T: Float + 'static> NeuralNetwork<T> {
    /// `new` is the constructor of `NeuralNetwork`.
    /// If the height of a given matrix is not 1, it means batch processing.
    ///
    /// # Arguments
    ///
    /// * `init_neurons` - The initial matrix \\(\mathbb{R}^{n\times m}\\).
    pub fn new(init_neurons: Array2<T>) -> Result<Self, Error> {
        if init_neurons.is_empty() {
            return Err(failure::format_err!("the matrix is empty"));
        }

        Ok(NeuralNetwork::<T> {
            neurons: init_neurons,
        })
    }

    /// Let a current matrix \\(X^{1\times m_X}\\),
    /// given arguments \\(W^{n_W\times m_W}\\) (weight) and \\(B^{1\times m_B}\\) (bias)
    /// where \\(m_X=n_W\\), \\(m_W=m_B\\).
    /// Thus, `next` computes next neurons \\(X W+B\\).
    /// If \\(m_X \not = n_W\\) or \\(m_W \not = m_B\\), it returns `Err`.
    ///
    /// # Arguments
    ///
    /// * `weight` - Weight matrix \\(W^{n_W\times m_W}\\) for computing next neuron.
    /// * `bias` - Bias matrix \\(B^{1\times m_B}\\) for computing next neuron.
    /// * `activate_function` - The activate function.
    #[inline]
    pub fn safe_next(
        &mut self,
        weight: &Array2<T>,
        bias: &Array2<T>,
        activate_function: &Box<dyn Fn(Array2<T>) -> Array2<T>>,
    ) -> Result<(), Error> {
        match (self.neurons.dim(), weight.dim(), bias.dim()) {
            ((_, width1), (height, width2), (_, width3))
                if width1 == height && width2 == width3 =>
            {
                Ok(self.next(weight, bias, activate_function))
            }
            _ => Err(failure::format_err!("Invalid argument")),
        }
    }

    /// Compute \\(h(X\cdot W+B)\\) where \\(X^{n_X\times m_X}\\) is a neurons matrix,
    /// \\(W^{n_W\times m_W\\) is a weights matrix,
    /// \\(B^{1\tims m_B}\\) is a bias matrix.
    /// These arguments must follow \\(m_X=n_W\\), \\(m_W=m_B\\).
    ///
    /// # Arguments
    ///
    /// * `weight` - Weight matrix \\(W^{n_W\times m_W\\) for computing next neuron.
    /// * `bias` - Bias matrix \\(B^{n_B\times m_B}\\) for computing next neuron.
    /// * `activate_function` - The activate_function.
    #[inline]
    pub fn next(
        &mut self,
        weight: &Array2<T>,
        bias: &Array2<T>,
        activate_function: &Box<dyn Fn(Array2<T>) -> Array2<T>>,
    ) {
        self.neurons = activate_function(self.neurons.dot(weight) + bias)
    }

    /// `dim` returns the shape of the array.
    #[inline]
    pub fn dim(&self) -> (ndarray::Ix, ndarray::Ix) {
        self.neurons.dim()
    }

    /// `argmax` returns the index of maximum value.
    /// 行毎の最大値
    #[inline]
    pub fn argmax(&self) -> Vec<usize> {
        self.neurons
            .outer_iter()
            .map(|x| x.argmax().unwrap())
            .collect::<Vec<usize>>()
    }
}