This file is indexed.

/usr/share/pyshared/mpmath/calculus/approximation.py is in python-mpmath 0.18-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
from ..libmp.backend import xrange
from .calculus import defun

#----------------------------------------------------------------------------#
#                              Approximation methods                         #
#----------------------------------------------------------------------------#

# The Chebyshev approximation formula is given at:
# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html

# The only major changes in the following code is that we return the
# expanded polynomial coefficients instead of Chebyshev coefficients,
# and that we automatically transform [a,b] -> [-1,1] and back
# for convenience.

# Coefficient in Chebyshev approximation
def chebcoeff(ctx,f,a,b,j,N):
    s = ctx.mpf(0)
    h = ctx.mpf(0.5)
    for k in range(1, N+1):
        t = ctx.cospi((k-h)/N)
        s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
    return 2*s/N

# Generate Chebyshev polynomials T_n(ax+b) in expanded form
def chebT(ctx, a=1, b=0):
    Tb = [1]
    yield Tb
    Ta = [b, a]
    while 1:
        yield Ta
        # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
        Tmp = [0] + [2*a*t for t in Ta]
        for i, c in enumerate(Ta): Tmp[i] += 2*b*c
        for i, c in enumerate(Tb): Tmp[i] -= c
        Ta, Tb = Tmp, Ta

@defun
def chebyfit(ctx, f, interval, N, error=False):
    r"""
    Computes a polynomial of degree `N-1` that approximates the
    given function `f` on the interval `[a, b]`. With ``error=True``,
    :func:`~mpmath.chebyfit` also returns an accurate estimate of the
    maximum absolute error; that is, the maximum value of
    `|f(x) - P(x)|` for `x \in [a, b]`.

    :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
    which gives a nearly optimal solution: that is, the maximum
    error of the approximating polynomial is very close to
    the smallest possible for any polynomial of the same degree.

    Chebyshev approximation is very useful if one needs repeated
    evaluation of an expensive function, such as function defined
    implicitly by an integral or a differential equation. (For
    example, it could be used to turn a slow mpmath function
    into a fast machine-precision version of the same.)

    **Examples**

    Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
    of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::

        >>> from mpmath import *
        >>> mp.dps = 15; mp.pretty = True
        >>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
        >>> nprint(poly)
        [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
        >>> nprint(err, 12)
        1.61351758081e-5

    The polynomial can be evaluated using ``polyval``::

        >>> nprint(polyval(poly, 1.6), 12)
        -0.0291858904138
        >>> nprint(cos(1.6), 12)
        -0.0291995223013

    Sampling the true error at 1000 points shows that the error
    estimate generated by ``chebyfit`` is remarkably good::

        >>> error = lambda x: abs(cos(x) - polyval(poly, x))
        >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
        1.61349954245e-5

    **Choice of degree**

    The degree `N` can be set arbitrarily high, to obtain an
    arbitrarily good approximation. As a rule of thumb, an
    `N`-term Chebyshev approximation is good to `N/(b-a)` decimal
    places on a unit interval (although this depends on how
    well-behaved `f` is). The cost grows accordingly: ``chebyfit``
    evaluates the function `(N^2)/2` times to compute the
    coefficients and an additional `N` times to estimate the error.

    **Possible issues**

    One should be careful to use a sufficiently high working
    precision both when calling ``chebyfit`` and when evaluating
    the resulting polynomial, as the polynomial is sometimes
    ill-conditioned. It is for example difficult to reach
    15-digit accuracy when evaluating the polynomial using
    machine precision floats, no matter the theoretical
    accuracy of the polynomial. (The option to return the
    coefficients in Chebyshev form should be made available
    in the future.)

    It is important to note the Chebyshev approximation works
    poorly if `f` is not smooth. A function containing singularities,
    rapid oscillation, etc can be approximated more effectively by
    multiplying it by a weight function that cancels out the
    nonsmooth features, or by dividing the interval into several
    segments.
    """
    a, b = ctx._as_points(interval)
    orig = ctx.prec
    try:
        ctx.prec = orig + int(N**0.5) + 20
        c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
        d = [ctx.zero] * N
        d[0] = -c[0]/2
        h = ctx.mpf(0.5)
        T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
        for (k, Tk) in zip(range(N), T):
            for i in range(len(Tk)):
                d[i] += c[k]*Tk[i]
        d = d[::-1]
        # Estimate maximum error
        err = ctx.zero
        for k in range(N):
            x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
            err = max(err, abs(f(x) - ctx.polyval(d, x)))
    finally:
        ctx.prec = orig
    if error:
        return d, +err
    else:
        return d

@defun
def fourier(ctx, f, interval, N):
    r"""
    Computes the Fourier series of degree `N` of the given function
    on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
    two lists `(c, s)` of coefficients (the cosine series and sine
    series, respectively), such that

    .. math ::

        f(x) \sim \sum_{k=0}^N
            c_k \cos(k m x) + s_k \sin(k m x)

    where `m = 2 \pi / (b-a)`.

    Note that many texts define the first coefficient as `2 c_0` instead
    of `c_0`. The easiest way to evaluate the computed series correctly
    is to pass it to :func:`~mpmath.fourierval`.

    **Examples**

    The function `f(x) = x` has a simple Fourier series on the standard
    interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
    the function has odd symmetry), and the sine coefficients are
    rational numbers::

        >>> from mpmath import *
        >>> mp.dps = 15; mp.pretty = True
        >>> c, s = fourier(lambda x: x, [-pi, pi], 5)
        >>> nprint(c)
        [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        >>> nprint(s)
        [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]

    This computes a Fourier series of a nonsymmetric function on
    a nonstandard interval::

        >>> I = [-1, 1.5]
        >>> f = lambda x: x**2 - 4*x + 1
        >>> cs = fourier(f, I, 4)
        >>> nprint(cs[0])
        [0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
        >>> nprint(cs[1])
        [0.0, -2.6255, 0.580905, 0.219974, -0.540057]

    It is instructive to plot a function along with its truncated
    Fourier series::

        >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP

    Fourier series generally converge slowly (and may not converge
    pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
    series gives an `L^2` error corresponding to 2-digit accuracy::

        >>> I = [-1, 1]
        >>> cs = fourier(cosh, I, 9)
        >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
        >>> nprint(sqrt(quad(g, I)))
        0.00467963

    :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
    the accuracy (and speed) can be improved by including all singular
    points in the interval specification::

        >>> nprint(fourier(abs, [-1, 1], 0), 10)
        ([0.5000441648], [0.0])
        >>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
        ([0.5], [0.0])

    """
    interval = ctx._as_points(interval)
    a = interval[0]
    b = interval[-1]
    L = b-a
    cos_series = []
    sin_series = []
    cutoff = ctx.eps*10
    for n in xrange(N+1):
        m = 2*n*ctx.pi/L
        an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
        bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
        if n == 0:
            an /= 2
        if abs(an) < cutoff: an = ctx.zero
        if abs(bn) < cutoff: bn = ctx.zero
        cos_series.append(an)
        sin_series.append(bn)
    return cos_series, sin_series

@defun
def fourierval(ctx, series, interval, x):
    """
    Evaluates a Fourier series (in the format computed by
    by :func:`~mpmath.fourier` for the given interval) at the point `x`.

    The series should be a pair `(c, s)` where `c` is the
    cosine series and `s` is the sine series. The two lists
    need not have the same length.
    """
    cs, ss = series
    ab = ctx._as_points(interval)
    a = interval[0]
    b = interval[-1]
    m = 2*ctx.pi/(ab[-1]-ab[0])
    s = ctx.zero
    s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
    s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
    return s