/usr/include/shogun/loss/SquaredLoss.h is in libshogun-dev 3.2.0-7.3build4.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | /*
Copyright (c) 2009 Yahoo! Inc. All rights reserved. The copyrights
embodied in the content of this file are licensed under the BSD
(revised) open source license.
Copyright (c) 2011 Berlin Institute of Technology and Max-Planck-Society.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Modifications (w) 2011 Shashwat Lal Das
Modifications (w) 2012 Fernando José Iglesias García
*/
#ifndef _SQUAREDLOSS_H__
#define _SQUAREDLOSS_H__
#include <shogun/loss/LossFunction.h>
namespace shogun
{
/** @brief CSquaredLoss implements the
* squared loss function.
*/
class CSquaredLoss: public CLossFunction
{
public:
/**
* Constructor
*/
CSquaredLoss(): CLossFunction() {};
/**
* Destructor
*/
~CSquaredLoss() {};
/**
* Get loss for an example
*
* @param prediction prediction
* @param label label
*
* @return loss
*/
float64_t loss(float64_t prediction, float64_t label);
/**
* Get loss for an example
*
* @param z where to evaluate the loss
*
* @return loss
*/
float64_t loss(float64_t z);
/**
* Get square of the gradient of the loss function
*
* @param prediction prediction
* @param label label
*
* @return square of gradient
*/
float64_t first_derivative(float64_t prediction, float64_t label);
/**
* Get first derivative of the loss function
*
* @param z where to evaluate the derivative of the loss
*
* @return first derivative
*/
float64_t first_derivative(float64_t z);
/**
* Get second derivative of the loss function
*
* @param prediction prediction
* @param label label
*
* @return second derivative
*/
float64_t second_derivative(float64_t prediction, float64_t label);
/**
* Get second derivative of the loss function
*
* @param z where to evaluate the second derivative of the loss
*
* @return second derivative
*/
float64_t second_derivative(float64_t z);
/**
* Get importance aware weight update for this loss function
*
* @param prediction prediction
* @param label label
* @param eta_t learning rate at update number t
* @param norm scale value
*
* @return update
*/
virtual float64_t get_update(float64_t prediction, float64_t label, float64_t eta_t, float64_t norm);
/**
* Get square of gradient, used for adaptive learning
*
* @param prediction prediction
* @param label label
*
* @return square of gradient
*/
virtual float64_t get_square_grad(float64_t prediction, float64_t label);
/**
* Return loss type
*
* @return L_SQUAREDLOSS
*/
virtual ELossType get_loss_type() { return L_SQUAREDLOSS; }
virtual const char* get_name() const { return "SquaredLoss"; }
};
}
#endif
|