This file is indexed.

/usr/lib/python2.7/dist-packages/openturns/uncertainty.py is in python-openturns 1.7-3.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.




"""
Probabilistic meta-package.
"""


from sys import version_info
if version_info >= (2, 6, 0):
    def swig_import_helper():
        from os.path import dirname
        import imp
        fp = None
        try:
            fp, pathname, description = imp.find_module('_uncertainty', [dirname(__file__)])
        except ImportError:
            import _uncertainty
            return _uncertainty
        if fp is not None:
            try:
                _mod = imp.load_module('_uncertainty', fp, pathname, description)
            finally:
                fp.close()
            return _mod
    _uncertainty = swig_import_helper()
    del swig_import_helper
else:
    import _uncertainty
del version_info
try:
    _swig_property = property
except NameError:
    pass  # Python < 2.2 doesn't have 'property'.


def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
    if (name == "thisown"):
        return self.this.own(value)
    if (name == "this"):
        if type(value).__name__ == 'SwigPyObject':
            self.__dict__[name] = value
            return
    method = class_type.__swig_setmethods__.get(name, None)
    if method:
        return method(self, value)
    if (not static):
        if _newclass:
            object.__setattr__(self, name, value)
        else:
            self.__dict__[name] = value
    else:
        raise AttributeError("You cannot add attributes to %s" % self)


def _swig_setattr(self, class_type, name, value):
    return _swig_setattr_nondynamic(self, class_type, name, value, 0)


def _swig_getattr_nondynamic(self, class_type, name, static=1):
    if (name == "thisown"):
        return self.this.own()
    method = class_type.__swig_getmethods__.get(name, None)
    if method:
        return method(self)
    if (not static):
        return object.__getattr__(self, name)
    else:
        raise AttributeError(name)

def _swig_getattr(self, class_type, name):
    return _swig_getattr_nondynamic(self, class_type, name, 0)


def _swig_repr(self):
    try:
        strthis = "proxy of " + self.this.__repr__()
    except:
        strthis = ""
    return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)

try:
    _object = object
    _newclass = 1
except AttributeError:
    class _object:
        pass
    _newclass = 0


class SwigPyIterator(_object):
    __swig_setmethods__ = {}
    __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
    __swig_getmethods__ = {}
    __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)

    def __init__(self, *args, **kwargs):
        raise AttributeError("No constructor defined - class is abstract")
    __repr__ = _swig_repr
    __swig_destroy__ = _uncertainty.delete_SwigPyIterator
    __del__ = lambda self: None

    def value(self):
        return _uncertainty.SwigPyIterator_value(self)

    def incr(self, n=1):
        return _uncertainty.SwigPyIterator_incr(self, n)

    def decr(self, n=1):
        return _uncertainty.SwigPyIterator_decr(self, n)

    def distance(self, x):
        return _uncertainty.SwigPyIterator_distance(self, x)

    def equal(self, x):
        return _uncertainty.SwigPyIterator_equal(self, x)

    def copy(self):
        return _uncertainty.SwigPyIterator_copy(self)

    def next(self):
        return _uncertainty.SwigPyIterator_next(self)

    def __next__(self):
        return _uncertainty.SwigPyIterator___next__(self)

    def previous(self):
        return _uncertainty.SwigPyIterator_previous(self)

    def advance(self, n):
        return _uncertainty.SwigPyIterator_advance(self, n)

    def __eq__(self, x):
        return _uncertainty.SwigPyIterator___eq__(self, x)

    def __ne__(self, x):
        return _uncertainty.SwigPyIterator___ne__(self, x)

    def __iadd__(self, n):
        return _uncertainty.SwigPyIterator___iadd__(self, n)

    def __isub__(self, n):
        return _uncertainty.SwigPyIterator___isub__(self, n)

    def __add__(self, n):
        return _uncertainty.SwigPyIterator___add__(self, n)

    def __sub__(self, *args):
        return _uncertainty.SwigPyIterator___sub__(self, *args)
    def __iter__(self):
        return self
SwigPyIterator_swigregister = _uncertainty.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)


_uncertainty.GCC_VERSION_swigconstant(_uncertainty)
GCC_VERSION = _uncertainty.GCC_VERSION

class TestFailed:
    """TestFailed is used to raise an uniform exception in tests."""

    __type = "TestFailed"

    def __init__(self, reason=""):
        self.reason = reason

    def type(self):
        return TestFailed.__type

    def what(self):
        return self.reason

    def __str__(self):
        return TestFailed.__type + ": " + self.reason

    def __lshift__(self, ch):
        self.reason += ch
        return self

import openturns.base
import openturns.common
import openturns.typ
import openturns.statistics
import openturns.graph
import openturns.func
import openturns.geom
import openturns.diff
import openturns.optim
import openturns.solver
import openturns.algo
import openturns.experiment
import openturns.model_copula
import openturns.randomvector
import openturns.dist_bundle1
import openturns.dist_bundle2
import openturns.weightedexperiment
import openturns.classification
import openturns.orthogonalbasis
import openturns.metamodel
class QuadraticCumul(openturns.common.PersistentObject):
    """
    First and second order quadratic cumul formulas.

    Available constructors:
        QuadraticCumul(*limitStateVariable*)

    Parameters
    ----------
    limitStateVariable : :class:`~openturns.RandomVector`
        This RandomVector must be of type *Composite*, which means it must have
        been defined with the fourth usage of declaration of a RandomVector
        (from a NumericalMathFunction and an antecedent Distribution) or with
        the class :class:`~openturns.CompositeRandomVector`.

    Notes
    -----
    The quadratic cumul is a probabilistic approach designed to
    propagate the uncertainties of the input variables :math:`\\uX` through the
    model :math:`h` towards the output variables :math:`\\uY`. It enables to access
    the central dispersion (Expectation, Variance) of the output variables.

    This method is based on a Taylor decomposition of the output variable
    :math:`\\uY` towards the :math:`\\uX` random vectors around the mean point
    :math:`\\muX`. Depending on the order of the Taylor decomposition (classically
    first order or second order), one can obtain different formulas introduced
    hereafter.

    As :math:`\\uY=h(\\uX)`, the Taylor decomposition around :math:`\\ux = \\muX` at
    the second order yields to:

    .. math::

        \\uY = h(\\muX) + <\\vect{\\vect{\\nabla}}h(\\muX) , \\: \\uX - \\muX> + \\frac{1}{2}<<\\vect{\\vect{\\vect{\\nabla }}}^2 h(\\muX,\\: \\vect{\\mu}_{\\:X}),\\: \\uX - \\muX>,\\: \\uX - \\muX> + o(\\Cov \\uX)

    where:

    - :math:`\\muX = \\Expect{\\uX}` is the vector of the input variables at the mean
      values of each component.

    - :math:`\\Cov \\uX` is the covariance matrix of the random vector `\\uX`. The
      elements are the followings :
      :math:`(\\Cov \\uX)_{ij} = \\Expect{\\left(X^i - \\Expect{X^i} \\right)^2}`

    - :math:`\\vect{\\vect{\\nabla}} h(\\muX) = \\: \\Tr{\\left( \\frac{\\partial y^i}{\\partial x^j}\\right)}_{\\ux\\: =\\: \\muX} = \\: \\Tr{\\left( \\frac{\\partial h^i(\\ux)}{\\partial x^j}\\right)}_{\\ux\\: =\\: \\muX}`
      is the transposed Jacobian matrix with :math:`i=1,\\ldots,n_Y` and
      :math:`j=1,\\ldots,n_X`.

    - :math:`\\vect{\\vect{\\vect{\\nabla^2}}} h(\\ux\\:,\\ux)` is a tensor of order 3. It
      is composed by the second order derivative towards the :math:`i^\\textrm{th}`
      and :math:`j^\\textrm{th}` components of :math:`\\ux` of the
      :math:`k^\\textrm{th}` component of the output vector :math:`h(\\ux)`. It
      yields to:
      :math:`\\left( \\nabla^2 h(\\ux) \\right)_{ijk} = \\frac{\\partial^2 (h^k(\\ux))}{\\partial x^i \\partial x^j}`

    - :math:`<\\vect{\\vect{\\nabla}}h(\\muX) , \\: \\uX - \\muX> = \\sum_{j=1}^{n_X} \\left( \\frac{\\partial {\\uy}}{\\partial {x^j}}\\right)_{\\ux = \\muX} . \\left( X^j-\\muX^j \\right)`

    -
      .. math::

          <<\\vect{\\vect{\\vect{\\nabla }}}^2 h(\\muX,\\: \\vect{\\mu}_{X}),\\: \\uX - \\muX>,\\: \\uX - \\muX> = \\left( \\Tr{(\\uX^i - \\muX^i)}. \\left(\\frac{\\partial^2 y^k}{\\partial x^i \\partial x^k}\\right)_{\\ux = \\muX}. (\\uX^j - \\muX^j) \\right)_{ijk}

    **Approximation at the order 1:**

    Expectation:

    .. math::

        \\Expect{\\uY} \\approx \\vect{h}(\\muX)

    Pay attention that :math:`\\Expect{\\uY}` is a vector. The :math:`k^\\textrm{th}`
    component of this vector is equal to the :math:`k^\\textrm{th}` component of the
    output vector computed by the model :math:`h` at the mean value.
    :math:`\\Expect{\\uY}` is thus the computation of the model at mean.

    Variance:

    .. math::

        \\Cov \\uY \\approx \\Tr{\\vect{\\vect{\\nabla}}}\\:\\vect{h}(\\muX).\\Cov \\uX.\\vect{\\vect{\\nabla}}\\:\\vect{h}(\\muX)

    **Approximation at the order 2:**

    Expectation:

    .. math::

        (\\Expect{\\uY})_k \\approx (\\vect{h}(\\muX))_k +
                                  \\left(
                                  \\sum_{i=1}^{n_X}\\frac{1}{2} (\\Cov \\uX)_{ii}.{(\\nabla^2\\:h(\\uX))}_{iik} +
                                  \\sum_{i=1}^{n_X} \\sum_{j=1}^{i-1} (\\Cov X)_{ij}.{(\\nabla^2\\:h(\\uX))}_{ijk}
                                  \\right)_k

    Variance:

    The decomposition of the variance at the order 2 is not implemented in the
    standard version of OpenTURNS. It requires both the knowledge of higher order
    derivatives of the model and the knowledge of moments of order strictly greater
    than 2 of the PDF.

    Examples
    --------
    >>> import openturns as ot
    >>> ot.RandomGenerator.SetSeed(0)
    >>> myFunc = ot.NumericalMathFunction(['x1', 'x2', 'x3', 'x4'], ['y1', 'y2'],
    ...     ['(x1*x1+x2^3*x1)/(2*x3*x3+x4^4+1)', 'cos(x2*x2+x4)/(x1*x1+1+x3^4)'])
    >>> R = ot.CorrelationMatrix(4)
    >>> for i in range(4):
    ...     R[i, i - 1] = 0.25
    >>> distribution = ot.Normal([0.2]*4, [0.1, 0.2, 0.3, 0.4], R)
    >>> # We create a distribution-based RandomVector
    >>> X = ot.RandomVector(distribution)
    >>> # We create a composite RandomVector Y from X and myFunc
    >>> Y = ot.RandomVector(myFunc, X)
    >>> # We create a quadraticCumul algorithm
    >>> myQuadraticCumul = ot.QuadraticCumul(Y)
    >>> print(myQuadraticCumul.getMeanFirstOrder())
    [0.0384615,0.932544]
    """
    __swig_setmethods__ = {}
    for _s in [openturns.common.PersistentObject]:
        __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
    __setattr__ = lambda self, name, value: _swig_setattr(self, QuadraticCumul, name, value)
    __swig_getmethods__ = {}
    for _s in [openturns.common.PersistentObject]:
        __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
    __getattr__ = lambda self, name: _swig_getattr(self, QuadraticCumul, name)

    def getClassName(self):
        """
        Accessor to the object's name.

        Returns
        -------
        class_name : str
            The object class name (`object.__class__.__name__`).
        """
        return _uncertainty.QuadraticCumul_getClassName(self)


    def __repr__(self):
        return _uncertainty.QuadraticCumul___repr__(self)

    def getLimitStateVariable(self):
        """
        Get the limit state variable.

        Returns
        -------
        limitStateVariable : :class:`~openturns.RandomVector`
            Limit state variable.
        """
        return _uncertainty.QuadraticCumul_getLimitStateVariable(self)


    def getMeanFirstOrder(self):
        """
        Get the approximation at the first order of the mean.

        Returns
        -------
        mean : :class:`~openturns.NumericalPoint`
            Approximation at the first order of the mean of the random vector.
        """
        return _uncertainty.QuadraticCumul_getMeanFirstOrder(self)


    def getMeanSecondOrder(self):
        """
        Get the approximation at the second order of the mean.

        Returns
        -------
        mean : :class:`~openturns.NumericalPoint`
            Approximation at the second order of the mean of the random vector
            (it requires that the hessian of the NumericalMathFunction has been defined).
        """
        return _uncertainty.QuadraticCumul_getMeanSecondOrder(self)


    def getCovariance(self):
        """
        Get the approximation at the first order of the covariance matrix.

        Returns
        -------
        covariance : :class:`~openturns.CovarianceMatrix`
            Approximation at the first order of the covariance matrix of the random
            vector.
        """
        return _uncertainty.QuadraticCumul_getCovariance(self)


    def getValueAtMean(self):
        """
        Get the value of the function.

        Returns
        -------
        value : :class:`~openturns.NumericalPoint`
            Value of the NumericalMathFunction which defines the random vector at
            the mean point of the input random vector.
        """
        return _uncertainty.QuadraticCumul_getValueAtMean(self)


    def getGradientAtMean(self):
        """
        Get the gradient of the function.

        Returns
        -------
        gradient : :class:`~openturns.Matrix`
            Gradient of the NumericalMathFunction which defines the random vector at
            the mean point of the input random vector.
        """
        return _uncertainty.QuadraticCumul_getGradientAtMean(self)


    def getHessianAtMean(self):
        """
        Get the hessian of the function.

        Returns
        -------
        hessian : :class:`~openturns.SymmetricTensor`
            Hessian of the NumericalMathFunction which defines the random vector at
            the mean point of the input random vector.
        """
        return _uncertainty.QuadraticCumul_getHessianAtMean(self)


    def getImportanceFactors(self):
        """
        Get the importance factors.

        Returns
        -------
        factors : :class:`~openturns.NumericalPoint`
            Importance factors of the inputs : only when randVect is of dimension 1.
        """
        return _uncertainty.QuadraticCumul_getImportanceFactors(self)


    def drawImportanceFactors(self):
        """
        Draw the importance factors.

        Returns
        -------
        graph : :class:`~openturns.Graph`
            Graph containing the pie corresponding to the importance factors of the
            probabilistic variables.
        """
        return _uncertainty.QuadraticCumul_drawImportanceFactors(self)


    def __init__(self, *args):
        this = _uncertainty.new_QuadraticCumul(*args)
        try:
            self.this.append(this)
        except:
            self.this = this
    __swig_destroy__ = _uncertainty.delete_QuadraticCumul
    __del__ = lambda self: None
QuadraticCumul_swigregister = _uncertainty.QuadraticCumul_swigregister
QuadraticCumul_swigregister(QuadraticCumul)

class ANCOVA(_object):
    """
    ANalysis of COVAriance method (ANCOVA).

    Available constructor:
        ANCOVA(*functionalChaosResult, correlatedInput*)

    Parameters
    ----------
    functionalChaosResult : :class:`~openturns.FunctionalChaosResult`
        Functional chaos result approximating the model response with
        uncorrelated inputs.
    correlatedInput : 2-d sequence of float
        Correlated inputs used to compute the real values of the output.
        Its dimension must be equal to the number of inputs of the model.

    Notes
    -----
    ANCOVA, a variance-based method described in [Caniou2012]_, is a generalization
    of the ANOVA (ANalysis Of VAriance) decomposition for models with correlated
    input parameters.

    Let us consider a model :math:`Y = h(\\vect{X})` without making any hypothesis
    on the dependence structure of :math:`\\vect{X} = \\{X^1, \\ldots, X^{n_X} \\}`, a
    n_X-dimensional random vector. The covariance decomposition requires a functional
    decomposition of the model. Thus the model response :math:`Y` is expanded as a
    sum of functions of increasing dimension as follows:

    .. math::
        :label: model

        h(\\vect{X}) = h_0 + \\sum_{u\\subseteq\\{1,\\dots,n_X\\}} h_u(X_u)

    :math:`h_0` is the mean of :math:`Y`. Each function :math:`h_u` represents,
    for any non empty set :math:`u\\subseteq\\{1, \\dots, n_X\\}`, the combined
    contribution of the variables :math:`X_u` to :math:`Y`.

    Using the properties of the covariance, the variance of :math:`Y` can be
    decomposed into a variance part and a covariance part as follows:

    .. math::

        Var[Y]&= Cov\\left[h_0 + \\sum_{u\\subseteq\\{1,\\dots,n_X\\}} h_u(X_u), h_0 + \\sum_{u\\subseteq\\{1,\\dots,n_X\\}} h_u(X_u)\\right] \\\\
              &= \\sum_{u\\subseteq\\{1,\\dots,n_X\\}} \\left[Var[h_u(X_u)] + Cov[h_u(X_u), \\sum_{v\\subseteq\\{1,\\dots,n_X\\}, v\\cap u=\\varnothing} h_v(X_v)]\\right]

    This variance formula enables to define each total part of variance of
    :math:`Y` due to :math:`X_u`, :math:`S_u`, as the sum of a *physical*
    (or *uncorrelated*) part and a *correlated* part such as:

    .. math::

        S_u = \\frac{Cov[Y, h_u(X_u)]} {Var[Y]} = S_u^U + S_u^C

    where :math:`S_u^U` is the uncorrelated part of variance of Y due to :math:`X_u`:

    .. math::

        S_u^U = \\frac{Var[h_u(X_u)]} {Var[Y]}

    and :math:`S_u^C` is the contribution of the correlation of :math:`X_u` with the
    other parameters:

    .. math::

        S_u^C = \\frac{Cov\\left[h_u(X_u), \\displaystyle \\sum_{v\\subseteq\\{1,\\dots,n_X\\}, v\\cap u=\\varnothing} h_v(X_v)\\right]}
                     {Var[Y]}

    As the computational cost of the indices with the numerical model :math:`h`
    can be very high, [Caniou2012]_ suggests to approximate the model response with
    a polynomial chaos expansion:

    .. math::

        Y \\simeq \\hat{h} = \\sum_{j=0}^{P-1} \\alpha_j \\Psi_j(x)

    However, for the sake of computational simplicity, the latter is constructed
    considering *independent* components :math:`\\{X^1,\\dots,X^{n_X}\\}`. Thus the
    chaos basis is not orthogonal with respect to the correlated inputs under
    consideration, and it is only used as a metamodel to generate approximated
    evaluations of the model response and its summands :eq:`model`.

    The next step consists in identifying the component functions. For instance, for
    :math:`u = \\{1\\}`:

    .. math::

        h_1(X_1) = \\sum_{\\alpha | \\alpha_1 \\neq 0, \\alpha_{i \\neq 1} = 0} y_{\\alpha} \\Psi_{\\alpha}(\\vect{X})

    where :math:`\\alpha` is a set of degrees associated to the :math:`n_X` univariate
    polynomial :math:`\\psi_i^{\\alpha_i}(X_i)`.

    Then the model response :math:`Y` is evaluated using a sample
    :math:`X=\\{x_k, k=1,\\dots,N\\}` of the correlated joint distribution. Finally,
    the several indices are computed using the model response and its component
    functions that have been identified on the polynomial chaos.

    Examples
    --------
    >>> import openturns as ot
    >>> ot.RandomGenerator.SetSeed(0)
    >>> # Model and distribution definition
    >>> model = ot.NumericalMathFunction(['X1','X2'], ['Y'], ['4.*X1 + 5.*X2'])
    >>> distribution = ot.ComposedDistribution([ot.Normal()] * 2)
    >>> S = ot.CorrelationMatrix(2)
    >>> S[1, 0] = 0.3
    >>> R = ot.NormalCopula().GetCorrelationFromSpearmanCorrelation(S)
    >>> CorrelatedInputDistribution = ot.ComposedDistribution([ot.Normal()] * 2, ot.NormalCopula(R))
    >>> sample = CorrelatedInputDistribution.getSample(2000)
    >>> # Functional chaos computation
    >>> productBasis = ot.OrthogonalProductPolynomialFactory([ot.HermiteFactory()] * 2, ot.EnumerateFunction(2))
    >>> adaptiveStrategy = ot.FixedStrategy(productBasis, 15)
    >>> projectionStrategy = ot.LeastSquaresStrategy(ot.MonteCarloExperiment(250))
    >>> algo = ot.FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy, projectionStrategy)
    >>> algo.run()
    >>> ancovaResult = ot.ANCOVA(algo.getResult(), sample)
    >>> indices = ancovaResult.getIndices()
    >>> print(indices)
    [0.411077,0.588923]
    >>> uncorrelatedIndices = ancovaResult.getUncorrelatedIndices()
    >>> print(uncorrelatedIndices)
    [0.29868,0.476527]
    >>> # Get indices measuring the correlated effects
    >>> print(indices - uncorrelatedIndices)
    [0.112397,0.112397]
    """
    __swig_setmethods__ = {}
    __setattr__ = lambda self, name, value: _swig_setattr(self, ANCOVA, name, value)
    __swig_getmethods__ = {}
    __getattr__ = lambda self, name: _swig_getattr(self, ANCOVA, name)
    __repr__ = _swig_repr

    def getUncorrelatedIndices(self, marginalIndex=0):
        """
        Accessor to the ANCOVA indices measuring uncorrelated effects.

        Parameters
        ----------
        marginalIndex : int, :math:`0 \\leq i < n`, optional
            Index of the model's marginal used to estimate the indices.
            By default, marginalIndex is equal to 0.

        Returns
        -------
        indices : :class:`~openturns.NumericalPoint`
            List of the ANCOVA indices measuring uncorrelated effects of the inputs.
            The effects of the correlation are represented by the indices resulting
            from the subtraction of the :meth:`getIndices` and
            :meth:`getUncorrelatedIndices` lists.
        """
        return _uncertainty.ANCOVA_getUncorrelatedIndices(self, marginalIndex)


    def getIndices(self, marginalIndex=0):
        """
        Accessor to the ANCOVA indices.

        Parameters
        ----------
        marginalIndex : int, :math:`0 \\leq i < n`, optional
            Index of the model's marginal used to estimate the indices.
            By default, marginalIndex is equal to 0.

        Returns
        -------
        indices : :class:`~openturns.NumericalPoint`
            List of the ANCOVA indices measuring the contribution of the
            input variables to the variance of the model. These indices are made up
            of a *physical* part and a *correlated* part. The first one is obtained
            thanks to :meth:`getUncorrelatedIndices`.
            The effects of the correlation are represented by the indices resulting
            from the subtraction of the :meth:`getIndices` and
            :meth:`getUncorrelatedIndices` lists.
        """
        return _uncertainty.ANCOVA_getIndices(self, marginalIndex)


    def __init__(self, *args):
        this = _uncertainty.new_ANCOVA(*args)
        try:
            self.this.append(this)
        except:
            self.this = this
    __swig_destroy__ = _uncertainty.delete_ANCOVA
    __del__ = lambda self: None
ANCOVA_swigregister = _uncertainty.ANCOVA_swigregister
ANCOVA_swigregister(ANCOVA)

class FAST(_object):
    """
    Fourier Amplitude Sensitivity Testing (FAST).

    Available constructor:
        FAST(*model, distribution, N, Nr=1, M=4*)

    Parameters
    ----------
    model : :class:`~openturns.NumericalMathFunction`
        Definition of the model to analyse.
    distribution : :class:`~openturns.Distribution`
        Contains the distributions of each model's input.
        Its dimension must be equal to the number of inputs.
    N : int, :math:`N > Nr`
        Size of the sample from which the Fourier series are calculated.
        It represents the length of the discretization of the s-space.
    Nr : int, :math:`Nr \\geq 1`
        Number of resamplings. The extended FAST method involves a part of
        randomness in the computation of the indices. So it can be asked to
        realize the procedure *Nr* times and then to calculate the
        arithmetic means of the results over the *Nr* estimates.
    M : int, :math:`0 < M < N`
        Interference factor usually equal to 4 or higher.
        It corresponds to the truncation level of the Fourier series, i.e. the
        number of harmonics that are retained in the decomposition.

    Notes
    -----
    FAST is a sensitivity analysis method which is based upon the ANOVA
    decomposition of the variance of the model response :math:`y = f(\\vect{X})`,
    the latter being represented by its Fourier expansion.
    :math:`\\vect{X}=\\{X^1,\\dots,X^{n_X}\\}` is an input random vector of :math:`n_X`
    independent components.

    OpenTURNS implements the extended FAST method consisting in computing
    alternately the first order and the total-effect indices of each input.
    This approach, widely described in the paper by [Saltelli1999]_, relies upon a
    Fourier decomposition of the model response. Its key idea is to recast this
    representation as a function of a *scalar* parameter :math:`s`, by defining
    parametric curves :math:`s \\mapsto x_i(s), i=1, \\dots, n_X` exploring the
    support of the input random vector :math:`\\vect{X}`.

    Then the Fourier expansion of the model response is:

    .. math::

        f(s) = \\sum_{k \\in \\Zset^N} A_k cos(ks) + B_k sin(ks)

    where :math:`A_k` and :math:`B_k` are Fourier coefficients whose estimates are:

    .. math::

        \\hat{A}_k &= \\frac{1}{N} \\sum_{j=1}^N f(x_j^1,\\dots,x_j^{N_X}) cos\\left(\\frac{2k\\pi (j-1)}{N} \\right) \\quad , \\quad -\\frac{N}{2} \\leq k \\leq \\frac{N}{2} \\\\
        \\hat{B}_k &= \\frac{1}{N} \\sum_{j=1}^N f(x_j^1,\\dots,x_j^{N_X}) sin\\left(\\frac{2k\\pi (j-1)}{N} \\right) \\quad , \\quad -\\frac{N}{2} \\leq k \\leq \\frac{N}{2}


    The first order indices are estimated by:

    .. math::

        \\hat{S}_i = \\frac{\\hat{D}_i}{\\hat{D}}
                  = \\frac{\\sum_{p=1}^M(\\hat{A}_{p\\omega_i}^2 + \\hat{B}_{p\\omega_i}^2)^2}
                          {\\sum_{n=1}^{(N-1)/2}(\\hat{A}_n^2 + \\hat{B}_n^2)^2}

    and the total order indices by:

    .. math::

        \\hat{T}_i = 1 - \\frac{\\hat{D}_{-i}}{\\hat{D}}
                  = 1 - \\frac{\\sum_{k=1}^{\\omega_i/2}(\\hat{A}_k^2 + \\hat{B}_k^2)^2}
                              {\\sum_{n=1}^{(N-1)/2}(\\hat{A}_n^2 + \\hat{B}_n^2)^2}

    where :math:`\\hat{D}` is the total variance, :math:`\\hat{D}_i` the portion
    of :math:`D` arising from the uncertainty of the :math:`i^{th}` input and
    :math:`\\hat{D}_{-i}` is the part of the variance due to all the inputs
    except the :math:`i^{th}` input.

    :math:`N` is the size of the sample using to compute the Fourier series and
    :math:`M` is the interference factor. *Saltelli et al.* (1999) recommanded to
    set :math:`M` to a value in the range :math:`[4, 6]`.
    :math:`\\{\\omega_i\\}, \\forall i=1, \\dots, n_X` is a set of integer frequencies
    assigned to each input :math:`X^i`. The frequency associated with the input
    for which the sensitivity indices are computed, is set to the maximum admissible
    frequency satisfying the Nyquist criterion (which ensures to avoid aliasing effects):

    .. math::

        \\omega_i = \\frac{N - 1}{2M}

    In the paper by Saltelli et al. (1999), for high sample size, it is suggested
    that :math:`16 \\leq \\omega_i/N_r \\leq 64`.


    Examples
    --------
    >>> import openturns as ot
    >>> ot.RandomGenerator.SetSeed(0)
    >>> formulaIshigami = ['sin(_pi*X1)+7*sin(_pi*X2)*sin(_pi*X2)+0.1*((_pi*X3)*(_pi*X3)*(_pi*X3)*(_pi*X3))*sin(_pi*X1)']
    >>> modelIshigami = ot.NumericalMathFunction(['X1', 'X2', 'X3'], ['y'], formulaIshigami)
    >>> distributions = ot.ComposedDistribution([ot.Uniform(-1.0, 1.0)] * 3)
    >>> sensitivityAnalysis = ot.FAST(modelIshigami, distributions, 400)
    >>> print(sensitivityAnalysis.getFirstOrderIndices())
    [0.307461,0.442524,4.18878e-07]
    """
    __swig_setmethods__ = {}
    __setattr__ = lambda self, name, value: _swig_setattr(self, FAST, name, value)
    __swig_getmethods__ = {}
    __getattr__ = lambda self, name: _swig_getattr(self, FAST, name)
    __repr__ = _swig_repr

    def getFirstOrderIndices(self, marginalIndex=0):
        """
        Accessor to the first order indices.

        Parameters
        ----------
        marginalIndex : int, :math:`0 \\leq i < n`, optional
            Index of the model's marginal used to estimate the indices.
            By default, marginalIndex is equal to 0.

        Returns
        -------
        indices : :class:`~openturns.NumericalPoint`
            List of the first order indices of all the inputs.
        """
        return _uncertainty.FAST_getFirstOrderIndices(self, marginalIndex)


    def getTotalOrderIndices(self, marginalIndex=0):
        """
        Accessor to the total order indices.

        Parameters
        ----------
        marginalIndex : int, :math:`0 \\leq i < n`, optional
            Index of the model's  marginal used to estimate the indices.
            By default, marginalIndex is equal to 0.

        Returns
        -------
        indices : :class:`~openturns.NumericalPoint`
            List of the total-effect order indices of all the inputs.
        """
        return _uncertainty.FAST_getTotalOrderIndices(self, marginalIndex)


    def getFFTAlgorithm(self):
        """
        Accessor to the FFT algorithm implementation.

        Returns
        -------
        fft : a :class:`~openturns.FFT`
            A FFT algorithm.
        """
        return _uncertainty.FAST_getFFTAlgorithm(self)


    def setFFTAlgorithm(self, fft):
        """
        Accessor to the FFT algorithm implementation.

        Parameters
        ----------
        fft : a :class:`~openturns.FFT`
            A FFT algorithm.
        """
        return _uncertainty.FAST_setFFTAlgorithm(self, fft)


    def setBlockSize(self, blockSize):
        """
        Set the block size.

        Parameters
        ----------
        k : positive int
            Size of each block the sample is splitted into, this allows to save space
            while allowing multithreading, when available we recommend to use
            the number of available CPUs, set by default to :math:`1`.
        """
        return _uncertainty.FAST_setBlockSize(self, blockSize)


    def getBlockSize(self):
        """
        Get the block size.

        Returns
        -------
        k : positive int
            Size of each block the sample is splitted into, this allows to save space
            while allowing multithreading, when available we recommend to use
            the number of available CPUs, set by default to 1.
        """
        return _uncertainty.FAST_getBlockSize(self)


    def __init__(self, *args):
        this = _uncertainty.new_FAST(*args)
        try:
            self.this.append(this)
        except:
            self.this = this
    __swig_destroy__ = _uncertainty.delete_FAST
    __del__ = lambda self: None
FAST_swigregister = _uncertainty.FAST_swigregister
FAST_swigregister(FAST)

import openturns.transformation
import openturns.analytical
import openturns.simulation
import openturns.stattests
import openturns.model_process
# This file is compatible with both classic and new-style classes.