This file is indexed.

/usr/include/viennacl/linalg/bicgstab.hpp is in libviennacl-dev 1.5.2-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
#ifndef VIENNACL_LINALG_BICGSTAB_HPP_
#define VIENNACL_LINALG_BICGSTAB_HPP_

/* =========================================================================
   Copyright (c) 2010-2014, Institute for Microelectronics,
                            Institute for Analysis and Scientific Computing,
                            TU Wien.
   Portions of this software are copyright by UChicago Argonne, LLC.

                            -----------------
                  ViennaCL - The Vienna Computing Library
                            -----------------

   Project Head:    Karl Rupp                   rupp@iue.tuwien.ac.at

   (A list of authors and contributors can be found in the PDF manual)

   License:         MIT (X11), see file LICENSE in the base directory
============================================================================= */

/** @file bicgstab.hpp
    @brief The stabilized bi-conjugate gradient method is implemented here
*/

#include <vector>
#include <cmath>
#include "viennacl/forwards.h"
#include "viennacl/tools/tools.hpp"
#include "viennacl/linalg/prod.hpp"
#include "viennacl/linalg/inner_prod.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/traits/clear.hpp"
#include "viennacl/traits/size.hpp"
#include "viennacl/meta/result_of.hpp"

namespace viennacl
{
  namespace linalg
  {

    /** @brief A tag for the stabilized Bi-conjugate gradient solver. Used for supplying solver parameters and for dispatching the solve() function
    */
    class bicgstab_tag
    {
      public:
        /** @brief The constructor
        *
        * @param tol              Relative tolerance for the residual (solver quits if ||r|| < tol * ||r_initial||)
        * @param max_iters        The maximum number of iterations
        * @param max_iters_before_restart   The maximum number of iterations before BiCGStab is reinitialized (to avoid accumulation of round-off errors)
        */
        bicgstab_tag(double tol = 1e-8, vcl_size_t max_iters = 400, vcl_size_t max_iters_before_restart = 200)
          : tol_(tol), iterations_(max_iters), iterations_before_restart_(max_iters_before_restart) {}

        /** @brief Returns the relative tolerance */
        double tolerance() const { return tol_; }
        /** @brief Returns the maximum number of iterations */
        vcl_size_t max_iterations() const { return iterations_; }
        /** @brief Returns the maximum number of iterations before a restart*/
        vcl_size_t max_iterations_before_restart() const { return iterations_before_restart_; }

        /** @brief Return the number of solver iterations: */
        vcl_size_t iters() const { return iters_taken_; }
        void iters(vcl_size_t i) const { iters_taken_ = i; }

        /** @brief Returns the estimated relative error at the end of the solver run */
        double error() const { return last_error_; }
        /** @brief Sets the estimated relative error at the end of the solver run */
        void error(double e) const { last_error_ = e; }

      private:
        double tol_;
        vcl_size_t iterations_;
        vcl_size_t iterations_before_restart_;

        //return values from solver
        mutable vcl_size_t iters_taken_;
        mutable double last_error_;
    };


    /** @brief Implementation of the stabilized Bi-conjugate gradient solver
    *
    * Following the description in "Iterative Methods for Sparse Linear Systems" by Y. Saad
    *
    * @param matrix     The system matrix
    * @param rhs        The load vector
    * @param tag        Solver configuration tag
    * @return The result vector
    */
    template <typename MatrixType, typename VectorType>
    VectorType solve(const MatrixType & matrix, VectorType const & rhs, bicgstab_tag const & tag)
    {
      typedef typename viennacl::result_of::value_type<VectorType>::type        ScalarType;
      typedef typename viennacl::result_of::cpu_value_type<ScalarType>::type    CPU_ScalarType;
      VectorType result = rhs;
      viennacl::traits::clear(result);

      VectorType residual = rhs;
      VectorType p = rhs;
      VectorType r0star = rhs;
      VectorType tmp0 = rhs;
      VectorType tmp1 = rhs;
      VectorType s = rhs;

      CPU_ScalarType norm_rhs_host = viennacl::linalg::norm_2(residual);
      CPU_ScalarType ip_rr0star = norm_rhs_host * norm_rhs_host;
      CPU_ScalarType beta;
      CPU_ScalarType alpha;
      CPU_ScalarType omega;
      //ScalarType inner_prod_temp; //temporary variable for inner product computation
      CPU_ScalarType new_ip_rr0star = 0;
      CPU_ScalarType residual_norm = norm_rhs_host;

      if (norm_rhs_host == 0) //solution is zero if RHS norm is zero
        return result;

      bool restart_flag = true;
      vcl_size_t last_restart = 0;
      for (vcl_size_t i = 0; i < tag.max_iterations(); ++i)
      {
        if (restart_flag)
        {
          residual = rhs;
          residual -= viennacl::linalg::prod(matrix, result);
          p = residual;
          r0star = residual;
          ip_rr0star = viennacl::linalg::norm_2(residual);
          ip_rr0star *= ip_rr0star;
          restart_flag = false;
          last_restart = i;
        }

        tag.iters(i+1);
        tmp0 = viennacl::linalg::prod(matrix, p);
        alpha = ip_rr0star / viennacl::linalg::inner_prod(tmp0, r0star);

        s = residual - alpha*tmp0;

        tmp1 = viennacl::linalg::prod(matrix, s);
        CPU_ScalarType norm_tmp1 = viennacl::linalg::norm_2(tmp1);
        omega = viennacl::linalg::inner_prod(tmp1, s) / (norm_tmp1 * norm_tmp1);

        result += alpha * p + omega * s;
        residual = s - omega * tmp1;

        new_ip_rr0star = viennacl::linalg::inner_prod(residual, r0star);
        residual_norm = viennacl::linalg::norm_2(residual);
        if (std::fabs(residual_norm / norm_rhs_host) < tag.tolerance())
          break;

        beta = new_ip_rr0star / ip_rr0star * alpha/omega;
        ip_rr0star = new_ip_rr0star;

        if (ip_rr0star == 0 || omega == 0 || i - last_restart > tag.max_iterations_before_restart()) //search direction degenerate. A restart might help
          restart_flag = true;

        // Execution of
        //  p = residual + beta * (p - omega*tmp0);
        // without introducing temporary vectors:
        p -= omega * tmp0;
        p = residual + beta * p;
      }

      //store last error estimate:
      tag.error(residual_norm / norm_rhs_host);

      return result;
    }

    template <typename MatrixType, typename VectorType>
    VectorType solve(const MatrixType & matrix, VectorType const & rhs, bicgstab_tag const & tag, viennacl::linalg::no_precond)
    {
      return solve(matrix, rhs, tag);
    }

    /** @brief Implementation of the preconditioned stabilized Bi-conjugate gradient solver
    *
    * Following the description of the unpreconditioned case in "Iterative Methods for Sparse Linear Systems" by Y. Saad
    *
    * @param matrix     The system matrix
    * @param rhs        The load vector
    * @param tag        Solver configuration tag
    * @param precond    A preconditioner. Precondition operation is done via member function apply()
    * @return The result vector
    */
    template <typename MatrixType, typename VectorType, typename PreconditionerType>
    VectorType solve(const MatrixType & matrix, VectorType const & rhs, bicgstab_tag const & tag, PreconditionerType const & precond)
    {
      typedef typename viennacl::result_of::value_type<VectorType>::type        ScalarType;
      typedef typename viennacl::result_of::cpu_value_type<ScalarType>::type    CPU_ScalarType;
      VectorType result = rhs;
      viennacl::traits::clear(result);

      VectorType residual = rhs;
      VectorType r0star = residual;  //can be chosen arbitrarily in fact
      VectorType tmp0 = rhs;
      VectorType tmp1 = rhs;
      VectorType s = rhs;

      VectorType p = residual;

      CPU_ScalarType ip_rr0star = viennacl::linalg::norm_2(residual);
      CPU_ScalarType norm_rhs_host = viennacl::linalg::norm_2(residual);
      CPU_ScalarType beta;
      CPU_ScalarType alpha;
      CPU_ScalarType omega;
      CPU_ScalarType new_ip_rr0star = 0;
      CPU_ScalarType residual_norm = norm_rhs_host;

      if (norm_rhs_host == 0) //solution is zero if RHS norm is zero
        return result;

      bool restart_flag = true;
      vcl_size_t last_restart = 0;
      for (unsigned int i = 0; i < tag.max_iterations(); ++i)
      {
        if (restart_flag)
        {
          residual = rhs;
          residual -= viennacl::linalg::prod(matrix, result);
          precond.apply(residual);
          p = residual;
          r0star = residual;
          ip_rr0star = viennacl::linalg::norm_2(residual);
          ip_rr0star *= ip_rr0star;
          restart_flag = false;
          last_restart = i;
        }

        tag.iters(i+1);
        tmp0 = viennacl::linalg::prod(matrix, p);
        precond.apply(tmp0);
        alpha = ip_rr0star / viennacl::linalg::inner_prod(tmp0, r0star);

        s = residual - alpha*tmp0;

        tmp1 = viennacl::linalg::prod(matrix, s);
        precond.apply(tmp1);
        CPU_ScalarType norm_tmp1 = viennacl::linalg::norm_2(tmp1);
        omega = viennacl::linalg::inner_prod(tmp1, s) / (norm_tmp1 * norm_tmp1);

        result += alpha * p + omega * s;
        residual = s - omega * tmp1;

        residual_norm = viennacl::linalg::norm_2(residual);
        if (residual_norm / norm_rhs_host < tag.tolerance())
          break;

        new_ip_rr0star = viennacl::linalg::inner_prod(residual, r0star);

        beta = new_ip_rr0star / ip_rr0star * alpha/omega;
        ip_rr0star = new_ip_rr0star;

        if (ip_rr0star == 0 || omega == 0 || i - last_restart > tag.max_iterations_before_restart()) //search direction degenerate. A restart might help
          restart_flag = true;

        // Execution of
        //  p = residual + beta * (p - omega*tmp0);
        // without introducing temporary vectors:
        p -= omega * tmp0;
        p = residual + beta * p;

        //std::cout << "Rel. Residual in current step: " << std::sqrt(std::fabs(viennacl::linalg::inner_prod(residual, residual) / norm_rhs_host)) << std::endl;
      }

      //store last error estimate:
      tag.error(residual_norm / norm_rhs_host);

      return result;
    }

  }
}

#endif