/usr/include/caffe/layers/python_layer.hpp is in libcaffe-cpu-dev 1.0.0~rc4-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | #ifndef CAFFE_PYTHON_LAYER_HPP_
#define CAFFE_PYTHON_LAYER_HPP_
#include <boost/python.hpp>
#include <vector>
#include "caffe/layer.hpp"
namespace bp = boost::python;
namespace caffe {
template <typename Dtype>
class PythonLayer : public Layer<Dtype> {
public:
PythonLayer(PyObject* self, const LayerParameter& param)
: Layer<Dtype>(param), self_(bp::handle<>(bp::borrowed(self))) { }
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Disallow PythonLayer in MultiGPU training stage, due to GIL issues
// Details: https://github.com/BVLC/caffe/issues/2936
if (this->phase_ == TRAIN && Caffe::solver_count() > 1
&& !Caffe::multiprocess()) {
LOG(FATAL) << "PythonLayer does not support CLI Multi-GPU, use train.py";
}
self_.attr("param_str") = bp::str(
this->layer_param_.python_param().param_str());
self_.attr("phase") = static_cast<int>(this->phase_);
self_.attr("setup")(bottom, top);
}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
self_.attr("reshape")(bottom, top);
}
virtual inline bool ShareInParallel() const {
return this->layer_param_.python_param().share_in_parallel();
}
virtual inline const char* type() const { return "Python"; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
self_.attr("forward")(bottom, top);
}
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
self_.attr("backward")(top, propagate_down, bottom);
}
private:
bp::object self_;
};
} // namespace caffe
#endif
|