This file is indexed.

/usr/share/pyshared/ldap/schema/tokenizer.py is in python-ldap 2.4.10-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
"""
ldap.schema.tokenizer - Low-level parsing functions for schema element strings

See http://www.python-ldap.org/ for details.

\$Id: tokenizer.py,v 1.13 2009/04/29 18:13:55 stroeder Exp $
"""


def split_tokens(s,keywordDict):
  """
  Returns list of syntax elements with quotes and spaces
  stripped.
  """
  result = []
  result_append = result.append
  s_len = len(s)
  i = 0
  while i<s_len:
    start = i
    while i<s_len and s[i]!="'":
      if s[i]=="(" or s[i]==")":
        if i>start:
          result_append(s[start:i])
        result_append(s[i])
        i +=1 # Consume parentheses
        start = i
      elif s[i]==" " or s[i]=="$":
        if i>start:
          result_append(s[start:i])
        i +=1
        # Consume more space chars
        while i<s_len and s[i]==" ":
          i +=1
        start = i
      else:
        i +=1
    if i>start:
      result_append(s[start:i])
    i +=1
    if i>=s_len:
      break
    start = i
    while i<s_len and s[i]!="'":
      i +=1
    if i>=start:
      result_append(s[start:i])
    i +=1
  return result # split_tokens()


def extract_tokens(l,known_tokens):
  """
  Returns dictionary of known tokens with all values
  """
  assert l[0].strip()=="(" and l[-1].strip()==")",ValueError(l)
  result = {}
  result_has_key = result.has_key
  result.update(known_tokens)
  i = 0
  l_len = len(l)
  while i<l_len:
    if result_has_key(l[i]):
      token = l[i]
      i += 1 # Consume token
      if i<l_len:
        if result_has_key(l[i]):
          # non-valued
          result[token] = (())
        elif l[i]=="(":
          # multi-valued
          i += 1 # Consume left parentheses
          start = i
          while i<l_len and l[i]!=")":
            i += 1
          result[token] = tuple(filter(lambda v:v!='$',l[start:i]))
          i += 1 # Consume right parentheses
        else:
          # single-valued
          result[token] = l[i],
          i += 1 # Consume single value
    else:
      i += 1 # Consume unrecognized item
  return result