This file is indexed.

/usr/lib/python2.7/dist-packages/jmespath/lexer.py is in python-jmespath 0.4.1-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import re
from json import loads

from jmespath.exceptions import LexerError


class Lexer(object):
    TOKENS = (
        r'(?P<number>-?\d+)|'
        r'(?P<unquoted_identifier>([a-zA-Z_][a-zA-Z_0-9]*))|'
        r'(?P<quoted_identifier>("(?:\\\\|\\"|[^"])*"))|'
        r'(?P<literal>(`(?:\\\\|\\`|[^`])*`))|'
        r'(?P<filter>\[\?)|'
        r'(?P<or>\|\|)|'
        r'(?P<pipe>\|)|'
        r'(?P<ne>!=)|'
        r'(?P<rbrace>\})|'
        r'(?P<eq>==)|'
        r'(?P<dot>\.)|'
        r'(?P<star>\*)|'
        r'(?P<gte>>=)|'
        r'(?P<lparen>\()|'
        r'(?P<lbrace>\{)|'
        r'(?P<lte><=)|'
        r'(?P<flatten>\[\])|'
        r'(?P<rbracket>\])|'
        r'(?P<lbracket>\[)|'
        r'(?P<rparen>\))|'
        r'(?P<comma>,)|'
        r'(?P<colon>:)|'
        r'(?P<lt><)|'
        r'(?P<expref>&)|'
        r'(?P<gt>>)|'
        r'(?P<current>@)|'
        r'(?P<skip>[ \t]+)'
    )
    def __init__(self):
        self.master_regex = re.compile(self.TOKENS)

    def tokenize(self, expression):
        previous_column = 0
        for match in self.master_regex.finditer(expression):
            value = match.group()
            start = match.start()
            end = match.end()
            if match.lastgroup == 'skip':
                # Ignore whitespace.
                previous_column = end
                continue
            if start != previous_column:
                bad_value = expression[previous_column:start]
                # Try to give a good error message.
                if bad_value == '"':
                    raise LexerError(
                        lexer_position=previous_column,
                        lexer_value=value,
                        message='Starting quote is missing the ending quote',
                        expression=expression)
                raise LexerError(lexer_position=previous_column,
                                 lexer_value=value,
                                 message='Unknown character',
                                 expression=expression)
            previous_column = end
            token_type = match.lastgroup
            handler = getattr(self, '_token_%s' % token_type.lower(), None)
            if handler is not None:
                value = handler(value, start, end)
            yield {'type': token_type, 'value': value, 'start': start, 'end': end}
        # At the end of the loop make sure we've consumed all the input.
        # If we haven't then we have unidentified characters.
        if end != len(expression):
            msg = "Unknown characters at the end of the expression"
            raise LexerError(lexer_position=end,
                             lexer_value='',
                             message=msg, expression=expression)
        else:
            yield {'type': 'eof', 'value': '',
                   'start': len(expression), 'end': len(expression)}

    def _token_number(self, value, start, end):
        return int(value)

    def _token_quoted_identifier(self, value, start, end):
        try:
            return loads(value)
        except ValueError as e:
            error_message = str(e).split(':')[0]
            raise LexerError(lexer_position=start,
                             lexer_value=value,
                             message=error_message)

    def _token_literal(self, value, start, end):
        actual_value = value[1:-1]
        actual_value = actual_value.replace('\\`', '`').lstrip()
        # First, if it looks like JSON then we parse it as
        # JSON and any json parsing errors propogate as lexing
        # errors.
        if self._looks_like_json(actual_value):
            try:
                return loads(actual_value)
            except ValueError:
                raise LexerError(lexer_position=start,
                                lexer_value=value,
                                message="Bad token %s" % value)
        else:
            potential_value = '"%s"' % actual_value
            try:
                # There's a shortcut syntax where string literals
                # don't have to be quoted.  This is only true if the
                # string doesn't start with chars that could start a valid
                # JSON value.
                return loads(potential_value)
            except ValueError:
                raise LexerError(lexer_position=start,
                                lexer_value=value,
                                message="Bad token %s" % value)

    def _looks_like_json(self, value):
        # Figure out if the string "value" starts with something
        # that looks like json.
        if not value:
            return False
        elif value[0] in ['"', '{', '[']:
            return True
        elif value in ['true', 'false', 'null']:
            return True
        elif value[0] in ['-', '0', '1', '2', '3', '4', '5',
                          '6', '7', '8', '9']:
            # Then this is JSON, return True.
            try:
                loads(value)
                return True
            except ValueError:
                return False
        else:
            return False