/usr/share/perl5/CQL/Lexer.pm is in libcql-parser-perl 1.10-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 | package CQL::Lexer;
use strict;
use warnings;
use Carp qw( croak );
use String::Tokenizer;
use CQL::Token;
=head1 NAME
CQL::Lexer - a lexical analyzer for CQL
=head1 SYNOPSIS
my $lexer = CQL::Lexer->new();
$lexer->tokenize( 'foo and bar' );
my @tokens = $lexer->getTokens();
=head1 DESCRIPTION
CQL::Lexer is lexical analyzer for a string of CQL. Once you've
got a CQL::Lexer object you can tokenize a CQL string into CQL::Token
objects. Ordinarily you'll never want to do this yourself since
CQL::Parser calls CQL::Lexer for you.
CQL::Lexer uses Stevan Little's lovely String::Tokenizer in the background,
and does a bit of analysis afterwards to handle some peculiarities of
CQL: double quoted strings, <, <=, etc.
=head1 METHODS
=head2 new()
The constructor.
=cut
sub new {
my $class = shift;
my $self = {
tokenizer => String::Tokenizer->new(),
tokens => [],
position => 0,
};
return bless $self, ref($class) || $class;
}
=head2 tokenize()
Pass in a string of CQL to tokenize. This initializes the lexer with
data so that you can retrieve tokens.
=cut
sub tokenize {
my ( $self, $string ) = @_;
## extract the String::Tokenizer object we will use
my $tokenizer = $self->{tokenizer};
## reset position parsing a new string of tokens
$self->reset();
## delegate to String::Tokenizer for basic tokenization
debug( "tokenizing: $string" );
$tokenizer->tokenize( $string, '\/<>=()"',
String::Tokenizer->RETAIN_WHITESPACE );
## do a bit of lexical analysis on the results of basic
debug( "lexical analysis on tokens" );
my @tokens = _analyze( $tokenizer );
$self->{tokens} = \@tokens;
}
=head2 getTokens()
Returns a list of all the tokens.
=cut
sub getTokens {
my $self = shift;
return @{ $self->{tokens} };
}
=head2 token()
Returns the current token.
=cut
sub token {
my $self = shift;
return $self->{tokens}[ $self->{position} ];
}
=head2 nextToken()
Returns the next token, or undef if there are more tokens to retrieve
from the lexer.
=cut
sub nextToken {
my $self = shift;
## if we haven't gone over the end of our token list
## return the token at our current position while
## incrementing the position.
if ( $self->{position} < @{ $self->{tokens} } ) {
my $token = $self->{tokens}[ $self->{position}++ ];
return $token;
}
return CQL::Token->new( '' );
}
=head2 prevToken()
Returns the previous token, or undef if there are no tokens prior
to the current token.
=cut
sub prevToken {
my $self = shift;
## if we're not at the start of our list of tokens
## return the one previous to our current position
## while decrementing our position.
if ( $self->{position} > 0 ) {
my $token = $self->{tokens}[ --$self->{position} ];
return $token;
}
return CQL::Token->new( '' );
}
=head2 reset()
Resets the iterator to start reading tokens from the beginning.
=cut
sub reset {
shift->{position} = 0;
}
## Private sub used by _analyze for collecting a backslash escaped string terminated by "
sub _getString {
my $iterator = shift;
my $string = '"';
my $escaping = 0;
# loop through the tokens untill an unescaped " found
while ($iterator->hasNextToken()) {
my $token = $iterator->nextToken();
$string .= $token;
if ($escaping) {
$escaping = 0;
} elsif ($token eq '"') {
return $string;
} elsif ($token eq "\\") {
$escaping = 1;
}
}
croak( 'unterminated string ' . $string);
}
## Private sub used by _analyze to process \ outside double quotes.
## Because we tokenized on \ any \ outside double quotes (inside is handled by _getString)
## might need to be concatenated with a previous and or next CQL_WORD to form one CQL_WORD token
sub _concatBackslash {
my $tokensRef = shift;
my $i = 0;
while ($i < @$tokensRef) {
my $token = $$tokensRef[$i];
if ($token->getString() eq "\\") {
my $s = "\\";
my $replace = 0;
if ($i > 0) {
my $prevToken = $$tokensRef[$i - 1];
if (($prevToken->getType() == CQL_WORD) and !$prevToken->{terminated}) {
# concatenate and delete the previous CQL_WORD token
$s = $prevToken->getString() . $s;
$i--;
splice @$tokensRef, $i, 1;
$replace = 1;
}
}
if (!$token->{terminated} and ($i < $#$tokensRef)) {
my $nextToken = $$tokensRef[$i + 1];
if ($nextToken->getType() == CQL_WORD) {
# concatenate and delete the next CQL_WORD token
$s .= $nextToken->getString();
splice @$tokensRef, $i + 1, 1;
$replace = 1;
}
}
if ($replace) {
$$tokensRef[$i] = CQL::Token->new($s);
}
}
$i++;
}
}
sub _analyze {
my $tokenizer = shift;
my $iterator = $tokenizer->iterator();
my @tokens;
while ( defined (my $token = $iterator->nextToken()) ) {
## <=
if ( $token eq '<' and $iterator->lookAheadToken() eq '=' ) {
push( @tokens, CQL::Token->new( '<=' ) );
$iterator->nextToken();
}
## <>
elsif ( $token eq '<' and $iterator->lookAheadToken() eq '>' ) {
push( @tokens, CQL::Token->new( '<>') );
$iterator->nextToken();
}
## >=
elsif ( $token eq '>' and $iterator->lookAheadToken() eq '=' ) {
push( @tokens, CQL::Token->new( '>=' ) );
$iterator->nextToken();
}
## "quoted strings"
elsif ( $token eq '"' ) {
my $cqlToken = CQL::Token->new( _getString($iterator) );
## Mark this and the previous token as terminated to prevent concatenation with backslash
$cqlToken->{terminated} = 1;
if (@tokens) { $tokens[$#tokens]->{terminated} = 1; }
push( @tokens, $cqlToken );
}
## if it's just whitespace we can zap it
elsif ( $token =~ /\s+/ ) {
## Mark the previous token as terminated to prevent concatenation with backslash
if (@tokens) {
$tokens[$#tokens]->{terminated} = 1;
}
}
## otherwise it's fine the way it is
else {
push( @tokens, CQL::Token->new($token) );
}
} # while
## Concatenate \ outside double quotes with a previous and or next CQL_WORD to form one CQL_WORD token
_concatBackslash(\@tokens);
return @tokens;
}
sub debug {
return unless $CQL::DEBUG;
print STDERR 'CQL::Lexer: ', shift, "\n";
}
1;
|