/usr/include/caffe/layers/dropout_layer.hpp is in libcaffe-cpu-dev 1.0.0~rc4-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | #ifndef CAFFE_DROPOUT_LAYER_HPP_
#define CAFFE_DROPOUT_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/neuron_layer.hpp"
namespace caffe {
/**
* @brief During training only, sets a random portion of @f$x@f$ to 0, adjusting
* the rest of the vector magnitude accordingly.
*
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$
* @param top output Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the computed outputs @f$ y = |x| @f$
*/
template <typename Dtype>
class DropoutLayer : public NeuronLayer<Dtype> {
public:
/**
* @param param provides DropoutParameter dropout_param,
* with DropoutLayer options:
* - dropout_ratio (\b optional, default 0.5).
* Sets the probability @f$ p @f$ that any given unit is dropped.
*/
explicit DropoutLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Dropout"; }
protected:
/**
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$
* @param top output Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the computed outputs. At training time, we have @f$
* y_{\mbox{train}} = \left\{
* \begin{array}{ll}
* \frac{x}{1 - p} & \mbox{if } u > p \\
* 0 & \mbox{otherwise}
* \end{array} \right.
* @f$, where @f$ u \sim U(0, 1)@f$ is generated independently for each
* input at each iteration. At test time, we simply have
* @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$.
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$
Blob<unsigned int> rand_vec_;
/// the probability @f$ p @f$ of dropping any input
Dtype threshold_;
/// the scale for undropped inputs at train time @f$ 1 / (1 - p) @f$
Dtype scale_;
unsigned int uint_thres_;
};
} // namespace caffe
#endif // CAFFE_DROPOUT_LAYER_HPP_
|