This file is indexed.

/usr/share/pyshared/mako/ext/babelplugin.py is in python-mako 0.7.0-1.1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# ext/babelplugin.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from StringIO import StringIO

from babel.messages.extract import extract_python

from mako import lexer, parsetree

def extract(fileobj, keywords, comment_tags, options):
    """Extract messages from Mako templates.

    :param fileobj: the file-like object the messages should be extracted from
    :param keywords: a list of keywords (i.e. function names) that should be
                     recognized as translation functions
    :param comment_tags: a list of translator tags to search for and include
                         in the results
    :param options: a dictionary of additional options (optional)
    :return: an iterator over ``(lineno, funcname, message, comments)`` tuples
    :rtype: ``iterator``
    """
    encoding = options.get('input_encoding', options.get('encoding', None))

    template_node = lexer.Lexer(fileobj.read(),
                                input_encoding=encoding).parse()
    for extracted in extract_nodes(template_node.get_children(),
                                   keywords, comment_tags, options):
        yield extracted

def extract_nodes(nodes, keywords, comment_tags, options):
    """Extract messages from Mako's lexer node objects

    :param nodes: an iterable of Mako parsetree.Node objects to extract from
    :param keywords: a list of keywords (i.e. function names) that should be
                     recognized as translation functions
    :param comment_tags: a list of translator tags to search for and include
                         in the results
    :param options: a dictionary of additional options (optional)
    :return: an iterator over ``(lineno, funcname, message, comments)`` tuples
    :rtype: ``iterator``
    """
    translator_comments = []
    in_translator_comments = False

    for node in nodes:
        child_nodes = None
        if in_translator_comments and isinstance(node, parsetree.Text) and \
                not node.content.strip():
            # Ignore whitespace within translator comments
            continue

        if isinstance(node, parsetree.Comment):
            value = node.text.strip()
            if in_translator_comments:
                translator_comments.extend(_split_comment(node.lineno, value))
                continue
            for comment_tag in comment_tags:
                if value.startswith(comment_tag):
                    in_translator_comments = True
                    translator_comments.extend(_split_comment(node.lineno,
                                                              value))
            continue

        if isinstance(node, parsetree.DefTag):
            code = node.function_decl.code
            child_nodes = node.nodes
        elif isinstance(node, parsetree.BlockTag):
            code = node.body_decl.code
            child_nodes = node.nodes
        elif isinstance(node, parsetree.CallTag):
            code = node.code.code
            child_nodes = node.nodes
        elif isinstance(node, parsetree.PageTag):
            code = node.body_decl.code
        elif isinstance(node, parsetree.CallNamespaceTag):
            attribs = ', '.join(['%s=%s' % (key, val)
                                 for key, val in node.attributes.iteritems()])
            code = '{%s}' % attribs
            child_nodes = node.nodes
        elif isinstance(node, parsetree.ControlLine):
            if node.isend:
                translator_comments = []
                in_translator_comments = False
                continue
            code = node.text
        elif isinstance(node, parsetree.Code):
            # <% and <%! blocks would provide their own translator comments
            translator_comments = []
            in_translator_comments = False

            code = node.code.code
        elif isinstance(node, parsetree.Expression):
            code = node.code.code
        else:
            translator_comments = []
            in_translator_comments = False
            continue

        # Comments don't apply unless they immediately preceed the message
        if translator_comments and \
                translator_comments[-1][0] < node.lineno - 1:
            translator_comments = []
        else:
            translator_comments = \
                [comment[1] for comment in translator_comments]

        if isinstance(code, unicode):
            code = code.encode('ascii', 'backslashreplace')
        code = StringIO(code)
        for lineno, funcname, messages, python_translator_comments \
                in extract_python(code, keywords, comment_tags, options):
            yield (node.lineno + (lineno - 1), funcname, messages,
                   translator_comments + python_translator_comments)

        translator_comments = []
        in_translator_comments = False

        if child_nodes:
            for extracted in extract_nodes(child_nodes, keywords, comment_tags,
                                           options):
                yield extracted


def _split_comment(lineno, comment):
    """Return the multiline comment at lineno split into a list of comment line
    numbers and the accompanying comment line"""
    return [(lineno + index, line) for index, line in
            enumerate(comment.splitlines())]