log_layer.hpp 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. #ifndef CAFFE_LOG_LAYER_HPP_
  2. #define CAFFE_LOG_LAYER_HPP_
  3. #include <vector>
  4. #include "caffe/blob.hpp"
  5. #include "caffe/layer.hpp"
  6. #include "caffe/proto/caffe.pb.h"
  7. #include "caffe/layers/neuron_layer.hpp"
  8. namespace caffe {
  9. /**
  10. * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$,
  11. * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$,
  12. * and base @f$ \gamma @f$.
  13. */
  14. template <typename Dtype>
  15. class LogLayer : public NeuronLayer<Dtype> {
  16. public:
  17. /**
  18. * @param param provides LogParameter log_param,
  19. * with LogLayer options:
  20. * - scale (\b optional, default 1) the scale @f$ \alpha @f$
  21. * - shift (\b optional, default 0) the shift @f$ \beta @f$
  22. * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$)
  23. * the base @f$ \gamma @f$
  24. */
  25. explicit LogLayer(const LayerParameter& param)
  26. : NeuronLayer<Dtype>(param) {}
  27. virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
  28. const vector<Blob<Dtype>*>& top);
  29. virtual inline const char* type() const { return "Log"; }
  30. protected:
  31. /**
  32. * @param bottom input Blob vector (length 1)
  33. * -# @f$ (N \times C \times H \times W) @f$
  34. * the inputs @f$ x @f$
  35. * @param top output Blob vector (length 1)
  36. * -# @f$ (N \times C \times H \times W) @f$
  37. * the computed outputs @f$
  38. * y = log_{\gamma}(\alpha x + \beta)
  39. * @f$
  40. */
  41. virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
  42. const vector<Blob<Dtype>*>& top);
  43. virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
  44. const vector<Blob<Dtype>*>& top);
  45. /**
  46. * @brief Computes the error gradient w.r.t. the exp inputs.
  47. *
  48. * @param top output Blob vector (length 1), providing the error gradient with
  49. * respect to the outputs
  50. * -# @f$ (N \times C \times H \times W) @f$
  51. * containing error gradients @f$ \frac{\partial E}{\partial y} @f$
  52. * with respect to computed outputs @f$ y @f$
  53. * @param propagate_down see Layer::Backward.
  54. * @param bottom input Blob vector (length 1)
  55. * -# @f$ (N \times C \times H \times W) @f$
  56. * the inputs @f$ x @f$; Backward fills their diff with
  57. * gradients @f$
  58. * \frac{\partial E}{\partial x} =
  59. * \frac{\partial E}{\partial y} y \alpha \log_e(gamma)
  60. * @f$ if propagate_down[0]
  61. */
  62. virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
  63. const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  64. virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
  65. const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  66. Dtype base_scale_;
  67. Dtype input_scale_, input_shift_;
  68. Dtype backward_num_scale_;
  69. };
  70. } // namespace caffe
  71. #endif // CAFFE_LOG_LAYER_HPP_