/usr/include/shogun/classifier/vw/VowpalWabbit.h is in libshogun-dev 3.2.0-7.3build4.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | /*
* Copyright (c) 2009 Yahoo! Inc. All rights reserved. The copyrights
* embodied in the content of this file are licensed under the BSD
* (revised) open source license.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* Written (W) 2011 Shashwat Lal Das
* Adaptation of Vowpal Wabbit v5.1.
* Copyright (C) 2011 Berlin Institute of Technology and Max-Planck-Society.
*/
#ifndef _VOWPALWABBIT_H__
#define _VOWPALWABBIT_H__
#include <shogun/classifier/vw/vw_common.h>
#include <shogun/classifier/vw/learners/VwAdaptiveLearner.h>
#include <shogun/classifier/vw/learners/VwNonAdaptiveLearner.h>
#include <shogun/classifier/vw/VwRegressor.h>
#include <shogun/features/streaming/StreamingVwFeatures.h>
#include <shogun/machine/OnlineLinearMachine.h>
namespace shogun
{
/** @brief Class CVowpalWabbit is the implementation of the
* online learning algorithm used in Vowpal Wabbit.
*
* VW is a fast online learning algorithm which operates on
* sparse features. It uses an online gradient descent technique.
*
* For more details, refer to the tutorial at
* https://github.com/JohnLangford/vowpal_wabbit/wiki/v5.1_tutorial.pdf
*/
class CVowpalWabbit: public COnlineLinearMachine
{
public:
/** problem type */
MACHINE_PROBLEM_TYPE(PT_BINARY);
/**
* Default constructor
*/
CVowpalWabbit();
/**
* Constructor, taking a features object
* as argument
*
* @param feat StreamingVwFeatures object
*/
CVowpalWabbit(CStreamingVwFeatures* feat);
/** copy constructor
* @param vw another VowpalWabbit object
*/
CVowpalWabbit(CVowpalWabbit *vw);
/**
* Destructor
*/
~CVowpalWabbit();
/**
* Reinitialize the weight vectors.
* Call after updating env variables eg. stride.
*/
void reinitialize_weights();
/**
* Set whether one desires to not train and only
* make passes over all examples instead.
*
* This is useful if one wants to create a cache file from data.
*
* @param dont_train true if one doesn't want to train
*/
void set_no_training(bool dont_train) { no_training = dont_train; }
/**
* Set whether learning is adaptive or not
*
* @param adaptive_learning true if adaptive
*/
void set_adaptive(bool adaptive_learning);
/**
* Set whether to use the more expensive
* exact norm for adaptive learning
*
* @param exact_adaptive true if exact norm is required
*/
void set_exact_adaptive_norm(bool exact_adaptive);
/**
* Set number of passes (only works for cached input)
*
* @param passes number of passes
*/
void set_num_passes(int32_t passes)
{
env->num_passes = passes;
}
/**
* Load regressor from a dump file
*
* @param file_name name of regressor file
*/
void load_regressor(char* file_name);
/**
* Set regressor output parameters
*
* @param file_name name of file to save regressor to
* @param is_text human readable or not, bool
*/
void set_regressor_out(char* file_name, bool is_text = true);
/**
* Set file name of prediction output
*
* @param file_name name of file to save predictions to
*/
void set_prediction_out(char* file_name);
/**
* Add a pair of namespaces whose features should
* be crossed for quadratic updates
*
* @param pair a string with the two namespace names concatenated
*/
void add_quadratic_pair(char* pair);
/**
* Train on a StreamingVwFeatures object
*
* @param feat StreamingVwFeatures to train using
*/
virtual bool train_machine(CFeatures* feat = NULL);
/**
* Predict for an example
*
* @param ex VwExample to predict for
*
* @return prediction
*/
virtual float32_t predict_and_finalize(VwExample* ex);
/**
* Computes the exact norm during adaptive learning
*
* @param ex example
* @param sum_abs_x set by reference, sum of abs of features
*
* @return norm
*/
float32_t compute_exact_norm(VwExample* &ex, float32_t& sum_abs_x);
/**
* Computes the exact norm for quadratic features during adaptive learning
*
* @param weights weights
* @param page_feature current feature
* @param offer_features paired features
* @param mask mask
* @param g square of gradient
* @param sum_abs_x sum of absolute value of features
*
* @return norm
*/
float32_t compute_exact_norm_quad(float32_t* weights, VwFeature& page_feature, v_array<VwFeature> &offer_features,
vw_size_t mask, float32_t g, float32_t& sum_abs_x);
/**
* Get the environment
*
* @return environment as CVwEnvironment*
*/
virtual CVwEnvironment* get_env()
{
SG_REF(env);
return env;
}
/**
* Return the name of the object
*
* @return VowpalWabbit
*/
virtual const char* get_name() const { return "VowpalWabbit"; }
/**
* Sets the train/update methods depending on parameters
* set, eg. adaptive or not
*/
virtual void set_learner();
/**
* Get learner
*/
CVwLearner* get_learner() { return learner; }
private:
/**
* Initialize members
*
* @param feat Features object
*/
virtual void init(CStreamingVwFeatures* feat = NULL);
/**
* Predict with l1 regularization
*
* @param ex example
*
* @return prediction
*/
virtual float32_t inline_l1_predict(VwExample* &ex);
/**
* Predict with no regularization term
*
* @param ex example
*
* @return prediction
*/
virtual float32_t inline_predict(VwExample* &ex);
/**
* Reduce the prediction within limits
*
* @param ret prediction
*
* @return prediction within limits
*/
virtual float32_t finalize_prediction(float32_t ret);
/**
* Output example, i.e. write prediction, print update etc.
*
* @param ex example
*/
virtual void output_example(VwExample* &ex);
/**
* Print statistics like VW
*
* @param ex example
*/
virtual void print_update(VwExample* &ex);
/**
* Output the prediction to a file
*
* @param f file descriptor
* @param res prediction
* @param weight weight of example
* @param tag tag
*/
virtual void output_prediction(int32_t f, float32_t res, float32_t weight, v_array<char> tag);
/**
* Set whether to display statistics or not
*
* @param verbose true or false
*/
void set_verbose(bool verbose);
protected:
/// Features
CStreamingVwFeatures* features;
/// Environment for VW, i.e., globals
CVwEnvironment* env;
/// Learner to use
CVwLearner* learner;
/// Regressor
CVwRegressor* reg;
private:
/// Whether to display statistics or not
bool quiet;
/// Whether we should just run over examples without training
bool no_training;
/// Multiplication factor for number of examples to dump after
float32_t dump_interval;
/// Sum of loss since last printed update
float32_t sum_loss_since_last_dump;
/// Number of weighted examples in previous dump
float64_t old_weighted_examples;
/// Name of file to save regressor to
char* reg_name;
/// Whether to save regressor as readable text or not
bool reg_dump_text;
/// Whether to save predictions or not
bool save_predictions;
/// Descriptor of prediction file
int32_t prediction_fd;
};
}
#endif // _VOWPALWABBIT_H__
|