This file is indexed.

/usr/share/pyshared/_pytest/skipping.py is in python-pytest 2.2.4-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
""" support for skip/xfail functions and markers. """

import py, pytest
import sys

def pytest_addoption(parser):
    group = parser.getgroup("general")
    group.addoption('--runxfail',
           action="store_true", dest="runxfail", default=False,
           help="run tests even if they are marked xfail")

def pytest_configure(config):
    config.addinivalue_line("markers",
        "skipif(*conditions): skip the given test function if evaluation "
        "of all conditions has a True value.  Evaluation happens within the "
        "module global context. Example: skipif('sys.platform == \"win32\"') "
        "skips the test if we are on the win32 platform. "
    )
    config.addinivalue_line("markers",
        "xfail(*conditions, reason=None, run=True): mark the the test function "
        "as an expected failure. Optionally specify a reason and run=False "
        "if you don't even want to execute the test function. Any positional "
        "condition strings will be evaluated (like with skipif) and if one is "
        "False the marker will not be applied."
    )

def pytest_namespace():
    return dict(xfail=xfail)

class XFailed(pytest.fail.Exception):
    """ raised from an explicit call to py.test.xfail() """

def xfail(reason=""):
    """ xfail an executing test or setup functions with the given reason."""
    __tracebackhide__ = True
    raise XFailed(reason)
xfail.Exception = XFailed

class MarkEvaluator:
    def __init__(self, item, name):
        self.item = item
        self.name = name

    @property
    def holder(self):
        return self.item.keywords.get(self.name, None)
    def __bool__(self):
        return bool(self.holder)
    __nonzero__ = __bool__

    def wasvalid(self):
        return not hasattr(self, 'exc')

    def istrue(self):
        try:
            return self._istrue()
        except KeyboardInterrupt:
            raise
        except:
            self.exc = sys.exc_info()
            if isinstance(self.exc[1], SyntaxError):
                msg = [" " * (self.exc[1].offset + 4) + "^",]
                msg.append("SyntaxError: invalid syntax")
            else:
                msg = py.std.traceback.format_exception_only(*self.exc[:2])
            pytest.fail("Error evaluating %r expression\n"
                        "    %s\n"
                        "%s"
                        %(self.name, self.expr, "\n".join(msg)),
                        pytrace=False)

    def _getglobals(self):
        d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
        func = self.item.obj
        try:
            d.update(func.__globals__)
        except AttributeError:
            d.update(func.func_globals)
        return d

    def _istrue(self):
        if self.holder:
            d = self._getglobals()
            if self.holder.args:
                self.result = False
                for expr in self.holder.args:
                    self.expr = expr
                    if isinstance(expr, str):
                        result = cached_eval(self.item.config, expr, d)
                    else:
                        pytest.fail("expression is not a string")
                    if result:
                        self.result = True
                        self.expr = expr
                        break
            else:
                self.result = True
        return getattr(self, 'result', False)

    def get(self, attr, default=None):
        return self.holder.kwargs.get(attr, default)

    def getexplanation(self):
        expl = self.get('reason', None)
        if not expl:
            if not hasattr(self, 'expr'):
                return ""
            else:
                return "condition: " + str(self.expr)
        return expl


def pytest_runtest_setup(item):
    if not isinstance(item, pytest.Function):
        return
    evalskip = MarkEvaluator(item, 'skipif')
    if evalskip.istrue():
        py.test.skip(evalskip.getexplanation())
    item._evalxfail = MarkEvaluator(item, 'xfail')
    check_xfail_no_run(item)

def pytest_pyfunc_call(pyfuncitem):
    check_xfail_no_run(pyfuncitem)

def check_xfail_no_run(item):
    if not item.config.option.runxfail:
        evalxfail = item._evalxfail
        if evalxfail.istrue():
            if not evalxfail.get('run', True):
                py.test.xfail("[NOTRUN] " + evalxfail.getexplanation())

def pytest_runtest_makereport(__multicall__, item, call):
    if not isinstance(item, pytest.Function):
        return
    # unitttest special case, see setting of _unexpectedsuccess
    if hasattr(item, '_unexpectedsuccess'):
        rep = __multicall__.execute()
        if rep.when == "call":
            # we need to translate into how py.test encodes xpass
            rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
            rep.outcome = "failed"
        return rep
    if not (call.excinfo and
        call.excinfo.errisinstance(py.test.xfail.Exception)):
        evalxfail = getattr(item, '_evalxfail', None)
        if not evalxfail:
            return
    if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
        if not item.config.getvalue("runxfail"):
            rep = __multicall__.execute()
            rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
            rep.outcome = "skipped"
            return rep
    rep = __multicall__.execute()
    evalxfail = item._evalxfail
    if not item.config.option.runxfail:
        if evalxfail.wasvalid() and evalxfail.istrue():
            if call.excinfo:
                rep.outcome = "skipped"
                rep.keywords['xfail'] = evalxfail.getexplanation()
            elif call.when == "call":
                rep.outcome = "failed"
                rep.keywords['xfail'] = evalxfail.getexplanation()
            return rep
    if 'xfail' in rep.keywords:
        del rep.keywords['xfail']
    return rep

# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
    if 'xfail' in report.keywords:
        if report.skipped:
            return "xfailed", "x", "xfail"
        elif report.failed:
            return "xpassed", "X", "XPASS"

# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
    tr = terminalreporter
    if not tr.reportchars:
        #for name in "xfailed skipped failed xpassed":
        #    if not tr.stats.get(name, 0):
        #        tr.write_line("HINT: use '-r' option to see extra "
        #              "summary info about tests")
        #        break
        return

    lines = []
    for char in tr.reportchars:
        if char == "x":
            show_xfailed(terminalreporter, lines)
        elif char == "X":
            show_xpassed(terminalreporter, lines)
        elif char in "fF":
            show_simple(terminalreporter, lines, 'failed', "FAIL %s")
        elif char in "sS":
            show_skipped(terminalreporter, lines)
        elif char == "E":
            show_simple(terminalreporter, lines, 'error', "ERROR %s")
    if lines:
        tr._tw.sep("=", "short test summary info")
        for line in lines:
            tr._tw.line(line)

def show_simple(terminalreporter, lines, stat, format):
    tw = terminalreporter._tw
    failed = terminalreporter.stats.get(stat)
    if failed:
        for rep in failed:
            pos = rep.nodeid
            lines.append(format %(pos, ))

def show_xfailed(terminalreporter, lines):
    xfailed = terminalreporter.stats.get("xfailed")
    if xfailed:
        for rep in xfailed:
            pos = rep.nodeid
            reason = rep.keywords['xfail']
            lines.append("XFAIL %s" % (pos,))
            if reason:
                lines.append("  " + str(reason))

def show_xpassed(terminalreporter, lines):
    xpassed = terminalreporter.stats.get("xpassed")
    if xpassed:
        for rep in xpassed:
            pos = rep.nodeid
            reason = rep.keywords['xfail']
            lines.append("XPASS %s %s" %(pos, reason))

def cached_eval(config, expr, d):
    if not hasattr(config, '_evalcache'):
        config._evalcache = {}
    try:
        return config._evalcache[expr]
    except KeyError:
        #import sys
        #print >>sys.stderr, ("cache-miss: %r" % expr)
        exprcode = py.code.compile(expr, mode="eval")
        config._evalcache[expr] = x = eval(exprcode, d)
        return x


def folded_skips(skipped):
    d = {}
    for event in skipped:
        key = event.longrepr
        assert len(key) == 3, (event, key)
        d.setdefault(key, []).append(event)
    l = []
    for key, events in d.items():
        l.append((len(events),) + key)
    return l

def show_skipped(terminalreporter, lines):
    tr = terminalreporter
    skipped = tr.stats.get('skipped', [])
    if skipped:
        #if not tr.hasopt('skipped'):
        #    tr.write_line(
        #        "%d skipped tests, specify -rs for more info" %
        #        len(skipped))
        #    return
        fskips = folded_skips(skipped)
        if fskips:
            #tr.write_sep("_", "skipped test summary")
            for num, fspath, lineno, reason in fskips:
                if reason.startswith("Skipped: "):
                    reason = reason[9:]
                lines.append("SKIP [%d] %s:%d: %s" %
                    (num, fspath, lineno, reason))