/usr/lib/python2.7/dist-packages/stetl/inputs/fileinput.py is in python-stetl 1.1+ds-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 | # -*- coding: utf-8 -*-
#
# Input classes for ETL, Files.
#
# Author: Just van den Broecke
#
from stetl.component import Config
from stetl.input import Input
from stetl.util import Util, etree
from stetl.utils.apachelog import formats, parser
from stetl.packet import FORMAT
import csv
import re
import fnmatch
log = Util.get_log('fileinput')
class FileInput(Input):
"""
Abstract base class for specific FileInputs, use derived classes.
"""
# Start attribute config meta
# Applying Decorator pattern with the Config class to provide
# read-only config values from the configured properties.
@Config(ptype=str, default=None, required=False)
def file_path(self):
"""
Path to file or files or URLs: can be a dir or files or URLs
or even multiple, comma separated. For URLs only JSON is supported now.
"""
pass
@Config(ptype=str, default='*.[gxGX][mM][lL]', required=False)
def filename_pattern(self):
"""
Filename pattern according to Python ``glob.glob`` for example:
'\\*.[gxGX][mM][lL]'
"""
pass
@Config(ptype=bool, default=False, required=False)
def depth_search(self):
"""
Should we recurse into sub-directories to find files?
"""
pass
# End attribute config meta
def __init__(self, configdict, section, produces):
Input.__init__(self, configdict, section, produces)
# Create the list of files to be used as input
self.file_list = Util.make_file_list(self.file_path, None, self.filename_pattern, self.depth_search)
log.info("file_list=%s" % str(self.file_list))
if not len(self.file_list):
raise Exception('File list is empty!!')
self.cur_file_path = None
self.file_list_done = []
def read(self, packet):
if not len(self.file_list):
return packet
file_path = self.file_list.pop(0)
log.info("Read/parse for start for file=%s...." % file_path)
packet.data = self.read_file(file_path)
log.info("Read/parse ok for file=%s" % file_path)
# One-time read: we're all done
packet.set_end_of_doc()
if not len(self.file_list):
log.info("all files done")
packet.set_end_of_stream()
self.file_list_done.append(file_path)
return packet
def read_file(self, file_path):
"""
Override in subclass.
"""
pass
class StringFileInput(FileInput):
"""
Reads and produces file as String.
produces=FORMAT.string
"""
# Start attribute config meta
@Config(ptype=str, default=None, required=False)
def format_args(self):
"""
Formatting of content according to Python String.format()
Input file should have substitutable values like {schema} {foo}
format_args should be of the form ``format_args = schema:test foo:bar``
"""
pass
# End attribute config meta
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=FORMAT.string)
self.file = None
# Optional formatting of content according to Python String.format()
# Input file should have substitutable values like {schema} {foo}
# format_args should be of the form format_args = schema:test foo:bar
if self.format_args:
# Convert string to dict: http://stackoverflow.com/a/1248990
self.format_args = Util.string_to_dict(self.format_args, ':')
def read_file(self, file_path):
"""
Overridden from base class.
"""
file_content = None
with open(file_path, 'r') as f:
file_content = f.read()
# Optional: string substitution based on Python String.format()
# But you can also use StringSubstitutionFilter from filters.
if self.format_args:
file_content = file_content.format(**self.format_args)
return file_content
class XmlFileInput(FileInput):
"""
Parses XML files into etree docs (do not use for large files!).
produces=FORMAT.etree_doc
"""
# Constructor
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=FORMAT.etree_doc)
def read_file(self, file_path):
# One-time read/parse only
data = None
try:
data = etree.parse(file_path)
except Exception as e:
log.info('file read and parsed NOT OK : %s, err=%s' % (file_path, str(e)))
return data
class XmlElementStreamerFileInput(FileInput):
"""
Extracts XML elements from a file, outputs each feature element in Packet.
Parsing is streaming (no internal DOM buildup) so any file size can be handled.
Use this class for your big GML files!
produces=FORMAT.etree_element
"""
# Start attribute config meta
@Config(ptype=list, default=None, required=True)
def element_tags(self):
"""
Comma-separated string of XML (feature) element tag names of the elements that should be extracted
and added to the output element stream.
"""
pass
@Config(ptype=bool, default=False, required=False)
def strip_namespaces(self):
"""
should namespaces be removed from the input document and thus not be present in the output element stream?
"""
pass
# End attribute config meta
# Constructor
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=FORMAT.etree_element)
self.file_list_done = []
self.context = None
self.root = None
self.elem_count = 0
log.info("Element tags to be matched: %s" % self.element_tags)
def read(self, packet):
event = None
packet.data = None
if self.context is None:
if not len(self.file_list):
# No more files left, all done
log.info("No more files left")
return packet
# Files available: pop next file
self.cur_file_path = self.file_list.pop(0)
fd = open(self.cur_file_path)
self.elem_count = 0
log.info("file opened : %s" % self.cur_file_path)
self.context = etree.iterparse(fd, events=("start", "end"))
self.context = iter(self.context)
event, self.root = self.context.next()
try:
event, elem = self.context.next()
except (etree.XMLSyntaxError, StopIteration):
# workaround for etree.XMLSyntaxError https://bugs.launchpad.net/lxml/+bug/1185701
self.context = None
if self.context is None:
# Always end of doc
packet.set_end_of_doc()
log.info("End of doc: %s elem_count=%d" % (self.cur_file_path, self.elem_count))
# Maybe end of stream (all docs done)
if not len(self.file_list):
# No more files left: end of stream
packet.set_end_of_stream()
log.info("End of stream")
return packet
# Filter out Namespace from the tag
# this is the easiest way to go for now
tag = elem.tag.split('}')
if len(tag) == 2:
# Namespaced tag: 2nd is tag
tag = tag[1]
else:
# Non-namespaced tag: first
tag = tag[0]
if tag in self.element_tags:
if event == "start":
# TODO check if deepcopy is the right thing to do here.
# packet.data = elem
pass
# self.root.remove(elem)
elif event == "end":
# Delete the element from the tree
# self.root.clear()
packet.data = elem
self.elem_count += 1
self.root.remove(elem)
if self.strip_namespaces:
packet.data = Util.stripNamespaces(elem).getroot()
return packet
class LineStreamerFileInput(FileInput):
"""
Reads text-files, producing a stream of lines, one line per Packet.
NB assumed is that lines in the file have newlines !!
"""
def __init__(self, configdict, section, produces=FORMAT.line_stream):
FileInput.__init__(self, configdict, section, produces)
self.file_list_done = []
self.file = None
def read(self, packet):
# No more files left and done with current file ?
if not len(self.file_list) and self.file is None:
packet.set_end_of_stream()
log.info("EOF file list")
return packet
# Done with current file or first file ?
if self.file is None:
self.cur_file_path = self.file_list.pop(0)
self.file = open(self.cur_file_path, 'r')
log.info("file opened : %s" % self.cur_file_path)
if packet.is_end_of_stream():
return packet
# Assume valid line
line = self.file.readline()
# EOF reached ?
if not line or line == '':
packet.data = None
packet.set_end_of_doc()
log.info("EOF file")
if self.cur_file_path is not None:
self.file_list_done.append(self.cur_file_path)
self.cur_file_path = None
if not len(self.file_list):
# No more files left: end of stream reached
packet.set_end_of_stream()
log.info("EOF file list")
self.file = None
return packet
line = line.decode('utf-8')
packet.data = self.process_line(line)
return packet
def process_line(self, line):
"""
Override in subclass.
"""
return line
class XmlLineStreamerFileInput(LineStreamerFileInput):
"""
DEPRECATED Streams lines from an XML file(s)
NB assumed is that lines in the file have newlines !!
DEPRECATED better is to use XmlElementStreamerFileInput for GML features.
produces=FORMAT.xml_line_stream
"""
# Constructor
def __init__(self, configdict, section):
LineStreamerFileInput.__init__(self, configdict, section, produces=FORMAT.xml_line_stream)
class CsvFileInput(FileInput):
"""
Parse CSV file into stream of records (dict structures) or a one-time record array.
NB raw version: CSV needs to have first line with fieldnames.
produces=FORMAT.record or FORMAT.record_array
"""
@Config(ptype=str, default=',', required=False)
def delimiter(self):
"""
A one-character string used to separate fields. It defaults to ','.
"""
pass
@Config(ptype=str, default='"', required=False)
def quote_char(self):
"""
A one-character string used to quote fields containing special characters, such as the delimiter or quotechar,
or which contain new-line characters. It defaults to '"'.
"""
pass
# Constructor
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=[FORMAT.record_array, FORMAT.record])
self.file = None
def init(self):
# Init CSV reader
log.info('Open CSV file: %s', self.file_path)
self.file = open(self.file_path)
self.csv_reader = csv.DictReader(self.file, delimiter=self.delimiter, quotechar=self.quote_char)
if self._output_format == FORMAT.record_array:
self.arr = list()
def read(self, packet):
try:
packet.data = self.csv_reader.next()
if self._output_format == FORMAT.record_array:
while True:
self.arr.append(packet.data)
packet.data = self.csv_reader.next()
log.info("CSV row nr %d read: %s" % (self.csv_reader.line_num - 1, packet.data))
except Exception:
if self._output_format == FORMAT.record_array:
packet.data = self.arr
packet.set_end_of_stream()
self.file.close()
return packet
class JsonFileInput(FileInput):
"""
Parse JSON file from file system or URL into hierarchical data struct.
The struct format may also be a GeoJSON structure. In that case the
output_format needs to be explicitly set to geojson_collection in the component
config.
produces=FORMAT.struct or FORMAT.geojson_collection
"""
# Constructor
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=[FORMAT.struct, FORMAT.geojson_collection])
def read_file(self, file_path):
# One-time read/parse only
try:
import json
# may read/parse JSON from file or URL
if file_path.startswith('http'):
import urllib2
fp = urllib2.urlopen(file_path)
file_data = json.loads(fp.read())
else:
with open(file_path) as data_file:
file_data = json.load(data_file)
except Exception as e:
log.error('Cannot read JSON from %s, err= %s' % (file_path, str(e)))
raise e
return file_data
class ApacheLogFileInput(FileInput):
"""
Parses Apache log files. Lines are converted into records based on the log format.
Log format should follow Apache Log Format. See ApacheLogParser for details.
produces=FORMAT.record
"""
@Config(ptype=dict, default={'%l': 'logname', '%>s': 'status', '%D': 'deltat',
'%{User-agent}i': 'agent', '%b': 'bytes', '%{Referer}i': 'referer',
'%u': 'user', '%t': 'time', "'%h": 'host', '%r': 'request'}, required=False)
def key_map(self):
"""
Map of cryptic %-field names to readable keys in record.
"""
pass
@Config(ptype=str, default=formats['extended'], required=False)
def log_format(self):
"""
Log format according to Apache CLF
"""
pass
# Constructor
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=FORMAT.record)
self.file_list_done = []
self.file = None
self.parser = parser(self.log_format, self.key_map, options={'methods': ['GET', 'POST'],
'use_native_types': True,
'request_path_only': True,
'gen_key': True})
def read(self, packet):
# No more files left and done with current file ?
if not len(self.file_list) and self.file is None:
packet.set_end_of_stream()
log.info("EOF file list")
return packet
# Done with current file or first file ?
if self.file is None:
self.cur_file_path = self.file_list.pop(0)
self.file = open(self.cur_file_path, 'r')
log.info("file opened : %s" % self.cur_file_path)
if packet.is_end_of_stream():
return packet
# Assume valid line
line = self.file.readline()
# EOF reached ?
if not line or line == '':
packet.data = None
packet.set_end_of_doc()
log.info("EOF file")
if self.cur_file_path is not None:
self.file_list_done.append(self.cur_file_path)
self.cur_file_path = None
if not len(self.file_list):
# No more files left: end of stream reached
packet.set_end_of_stream()
log.info("EOF file list")
self.file = None
return packet
# Parse logfile line into record (dict)
packet.data = self.parser.parse(line)
return packet
class ZipFileInput(FileInput):
"""
Parse ZIP file from file system or URL into a stream of records containing zipfile-path and file names.
produces=FORMAT.record
"""
@Config(ptype=str, default='*', required=False)
def name_filter(self):
"""
Regular "glob.glob" expression for filtering out filenames from the ZIP archive.
"""
pass
def __init__(self, configdict, section):
FileInput.__init__(self, configdict, section, produces=FORMAT.record)
self.file_content = None
# Pre-compile name filter into regex object to match filenames in zip-archive(s) later
# See default (*) above, so we alo have a name_filter.
self.fname_matcher = re.compile(fnmatch.translate(self.name_filter))
def read(self, packet):
# No more files left and done with current file ?
if not self.file_content and not len(self.file_list):
packet.set_end_of_stream()
log.info("EOF file list, all files done")
return packet
# Done with current file or first file ?
if self.file_content is None:
self.cur_file_path = self.file_list.pop(0)
# Assemble list of file names in archive
import zipfile
zf = zipfile.ZipFile(self.cur_file_path, 'r')
namelist = [{'file_path': self.cur_file_path, 'name': name} for name in zf.namelist()]
# Apply filename filter to namelist (TODO could be done in single step with previous step)
self.file_content = [item for item in namelist if self.fname_matcher.match(item["name"])]
log.info("zip file read : %s filecount=%d" % (self.cur_file_path, len(self.file_content)))
if len(self.file_content):
packet.data = self.file_content.pop(0)
log.info("Pop file record: %s" % str(packet.data))
if not len(self.file_content):
self.file_content = None
return packet
class GlobFileInput(FileInput):
"""
Returns file names based on the glob.glob pattern given as filename_filter.
produces=FORMAT.string or FORMAT.line_stream
"""
def __init__(self, configdict, section, produces=[FORMAT.string, FORMAT.line_stream]):
FileInput.__init__(self, configdict, section, produces)
def read(self, packet):
if not len(self.file_list):
return packet
file_path = self.file_list.pop(0)
# TODO: os.path.join?
packet.data = file_path
# One-time read: we're all done
packet.set_end_of_doc()
if not len(self.file_list):
log.info("all files done")
packet.set_end_of_stream()
self.file_list_done.append(file_path)
return packet
|