12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182 |
- #ifndef CAFFE_LOG_LAYER_HPP_
- #define CAFFE_LOG_LAYER_HPP_
- #include <vector>
- #include "caffe/blob.hpp"
- #include "caffe/layer.hpp"
- #include "caffe/proto/caffe.pb.h"
- #include "caffe/layers/neuron_layer.hpp"
- namespace caffe {
- /**
- * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$,
- * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$,
- * and base @f$ \gamma @f$.
- */
- template <typename Dtype>
- class LogLayer : public NeuronLayer<Dtype> {
- public:
- /**
- * @param param provides LogParameter log_param,
- * with LogLayer options:
- * - scale (\b optional, default 1) the scale @f$ \alpha @f$
- * - shift (\b optional, default 0) the shift @f$ \beta @f$
- * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$)
- * the base @f$ \gamma @f$
- */
- explicit LogLayer(const LayerParameter& param)
- : NeuronLayer<Dtype>(param) {}
- virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- const vector<Blob<Dtype>*>& top);
- virtual inline const char* type() const { return "Log"; }
- protected:
- /**
- * @param bottom input Blob vector (length 1)
- * -# @f$ (N \times C \times H \times W) @f$
- * the inputs @f$ x @f$
- * @param top output Blob vector (length 1)
- * -# @f$ (N \times C \times H \times W) @f$
- * the computed outputs @f$
- * y = log_{\gamma}(\alpha x + \beta)
- * @f$
- */
- virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- const vector<Blob<Dtype>*>& top);
- virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- const vector<Blob<Dtype>*>& top);
- /**
- * @brief Computes the error gradient w.r.t. the exp inputs.
- *
- * @param top output Blob vector (length 1), providing the error gradient with
- * respect to the outputs
- * -# @f$ (N \times C \times H \times W) @f$
- * containing error gradients @f$ \frac{\partial E}{\partial y} @f$
- * with respect to computed outputs @f$ y @f$
- * @param propagate_down see Layer::Backward.
- * @param bottom input Blob vector (length 1)
- * -# @f$ (N \times C \times H \times W) @f$
- * the inputs @f$ x @f$; Backward fills their diff with
- * gradients @f$
- * \frac{\partial E}{\partial x} =
- * \frac{\partial E}{\partial y} y \alpha \log_e(gamma)
- * @f$ if propagate_down[0]
- */
- virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
- virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
- Dtype base_scale_;
- Dtype input_scale_, input_shift_;
- Dtype backward_num_scale_;
- };
- } // namespace caffe
- #endif // CAFFE_LOG_LAYER_HPP_
|