This file is indexed.

/usr/share/arm/util/log.py is in tor-arm 1.4.5.0-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
"""
Tracks application events, both directing them to attached listeners and
keeping a record of them. A limited space is provided for old events, keeping
and trimming them on a per-runlevel basis (ie, too many DEBUG events will only
result in entries from that runlevel being dropped). All functions are thread
safe.
"""

import os
import time
from sys import maxint
from threading import RLock

from util import enum

# Logging runlevels. These are *very* commonly used so including shorter
# aliases (so they can be referenced as log.DEBUG, log.WARN, etc).
Runlevel = enum.Enum(("DEBUG", "DEBUG"), ("INFO", "INFO"), ("NOTICE", "NOTICE"),
                     ("WARN", "WARN"), ("ERR", "ERR"))
DEBUG, INFO, NOTICE, WARN, ERR = Runlevel.values()

# provides thread safety for logging operations
LOG_LOCK = RLock()

# chronologically ordered records of events for each runlevel, stored as tuples
# consisting of: (time, message)
_backlog = dict([(level, []) for level in Runlevel.values()])

# mapping of runlevels to the listeners interested in receiving events from it
_listeners = dict([(level, []) for level in Runlevel.values()])

CONFIG = {"cache.armLog.size": 1000,
          "cache.armLog.trimSize": 200}

DUMP_FILE = None

def loadConfig(config):
  config.update(CONFIG, {
    "cache.armLog.size": 10,
    "cache.armLog.trimSize": 5})
  
  CONFIG["cache.armLog.trimSize"] = min(CONFIG["cache.armLog.trimSize"], CONFIG["cache.armLog.size"] / 2)

def setDumpFile(logPath):
  """
  Logs all future logged events to the given path. This raises an IOError if
  the file fails to be opened. If the file already exists then this overwrites
  it.
  
  Arguments:
    logPath - path where to persist logs
  """
  
  global DUMP_FILE
  
  # make sure that the parent directory exists
  baseDir = os.path.dirname(logPath)
  if not os.path.exists(baseDir): os.makedirs(baseDir)
  
  DUMP_FILE = open(logPath, "w")

def log(level, msg, eventTime = None):
  """
  Registers an event, directing it to interested listeners and preserving it in
  the backlog. If the level is None then this is a no-op.
  
  Arguments:
    level     - runlevel corresponding to the message severity
    msg       - string associated with the message
    eventTime - unix time at which the event occurred, current time if undefined
  """
  
  global DUMP_FILE
  if not level: return
  if eventTime == None: eventTime = time.time()
  
  LOG_LOCK.acquire()
  try:
    newEvent = (eventTime, msg)
    eventBacklog = _backlog[level]
    
    # inserts the new event into the backlog
    if not eventBacklog or eventTime >= eventBacklog[-1][0]:
      # newest event - append to end
      eventBacklog.append(newEvent)
    elif eventTime <= eventBacklog[0][0]:
      # oldest event - insert at start
      eventBacklog.insert(0, newEvent)
    else:
      # somewhere in the middle - start checking from the end
      for i in range(len(eventBacklog) - 1, -1, -1):
        if eventBacklog[i][0] <= eventTime:
          eventBacklog.insert(i + 1, newEvent)
          break
    
    # truncates backlog if too long
    toDelete = len(eventBacklog) - CONFIG["cache.armLog.size"]
    if toDelete >= 0: del eventBacklog[: toDelete + CONFIG["cache.armLog.trimSize"]]
    
    # persists the event if a debug file's been set
    if DUMP_FILE:
      try:
        entryTime = time.localtime(eventTime)
        timeLabel = "%i/%i/%i %02i:%02i:%02i" % (entryTime[1], entryTime[2], entryTime[0], entryTime[3], entryTime[4], entryTime[5])
        logEntry = "%s [%s] %s\n" % (timeLabel, level, msg)
        DUMP_FILE.write(logEntry)
        DUMP_FILE.flush()
      except IOError, exc:
        DUMP_FILE = None
        log(ERR, "Failed to write to the debug file - %s" % exc)
    
    # notifies listeners
    for callback in _listeners[level]:
      callback(level, msg, eventTime)
  finally:
    LOG_LOCK.release()

def addListener(level, callback):
  """
  Directs future events to the given callback function. The runlevels passed on
  to listeners are provided as the corresponding strings ("DEBUG", "INFO",
  "NOTICE", etc), and times in POSIX (unix) time.
  
  Arguments:
    level    - event runlevel the listener should be notified of
    callback - functor that'll accept the events, expected to be of the form:
               myFunction(level, msg, time)
  """
  
  if not callback in _listeners[level]:
    _listeners[level].append(callback)

def addListeners(levels, callback, dumpBacklog = False):
  """
  Directs future events of multiple runlevels to the given callback function.
  
  Arguments:
    levels      - list of runlevel events the listener should be notified of
    callback    - functor that'll accept the events, expected to be of the
                  form: myFunction(level, msg, time)
    dumpBacklog - if true, any past events of the designated runlevels will be
                  provided to the listener before returning (in chronological
                  order)
  """
  
  LOG_LOCK.acquire()
  try:
    for level in levels: addListener(level, callback)
    
    if dumpBacklog:
      for level, msg, eventTime in _getEntries(levels):
        callback(level, msg, eventTime)
  finally:
    LOG_LOCK.release()

def removeListener(level, callback):
  """
  Stops listener from being notified of further events. This returns true if a
  listener's removed, false otherwise.
  
  Arguments:
    level    - runlevel the listener to be removed
    callback - functor to be removed
  """
  
  if callback in _listeners[level]:
    _listeners[level].remove(callback)
    return True
  else: return False

def _getEntries(levels):
  """
  Generator for providing past events belonging to the given runlevels (in
  chronological order). This should be used under the LOG_LOCK to prevent
  concurrent modifications.
  
  Arguments:
    levels - runlevels for which events are provided
  """
  
  # drops any runlevels if there aren't entries in it
  toRemove = [level for level in levels if not _backlog[level]]
  for level in toRemove: levels.remove(level)
  
  # tracks where unprocessed entries start in the backlog
  backlogPtr = dict([(level, 0) for level in levels])
  
  while levels:
    earliestLevel, earliestMsg, earliestTime = None, "", maxint
    
    # finds the earliest unprocessed event
    for level in levels:
      entry = _backlog[level][backlogPtr[level]]
      
      if entry[0] < earliestTime:
        earliestLevel, earliestMsg, earliestTime = level, entry[1], entry[0]
    
    yield (earliestLevel, earliestMsg, earliestTime)
    
    # removes runlevel if there aren't any more entries
    backlogPtr[earliestLevel] += 1
    if len(_backlog[earliestLevel]) <= backlogPtr[earliestLevel]:
      levels.remove(earliestLevel)