/usr/include/caffe/layers/elu_layer.hpp is in libcaffe-cpu-dev 1.0.0~rc4-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | #ifndef CAFFE_ELU_LAYER_HPP_
#define CAFFE_ELU_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/neuron_layer.hpp"
namespace caffe {
/**
* @brief Exponential Linear Unit non-linearity @f$
* y = \left\{
* \begin{array}{lr}
* x & \mathrm{if} \; x > 0 \\
* \alpha (\exp(x)-1) & \mathrm{if} \; x \le 0
* \end{array} \right.
* @f$.
*/
template <typename Dtype>
class ELULayer : public NeuronLayer<Dtype> {
public:
/**
* @param param provides ELUParameter elu_param,
* with ELULayer options:
* - alpha (\b optional, default 1).
* the value @f$ \alpha @f$ by which controls saturation for negative inputs.
*/
explicit ELULayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual inline const char* type() const { return "ELU"; }
protected:
/**
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$
* @param top output Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the computed outputs @f$
* y = \left\{
* \begin{array}{lr}
* x & \mathrm{if} \; x > 0 \\
* \alpha (\exp(x)-1) & \mathrm{if} \; x \le 0
* \end{array} \right.
* @f$.
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the ELU inputs.
*
* @param top output Blob vector (length 1), providing the error gradient with
* respect to the outputs
* -# @f$ (N \times C \times H \times W) @f$
* containing error gradients @f$ \frac{\partial E}{\partial y} @f$
* with respect to computed outputs @f$ y @f$
* @param propagate_down see Layer::Backward.
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$; Backward fills their diff with
* gradients @f$
* \frac{\partial E}{\partial x} = \left\{
* \begin{array}{lr}
* 1 & \mathrm{if} \; x > 0 \\
* y + \alpha & \mathrm{if} \; x \le 0
* \end{array} \right.
* @f$ if propagate_down[0].
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
} // namespace caffe
#endif // CAFFE_ELU_LAYER_HPP_
|