JFIFHHC     C  " 5????! ??? JFIF    >CREATOR: gd-jpeg v1.0 (using IJG JPEG v62), default quality C     p!ranha?
Server IP : 172.67.137.82  /  Your IP : 104.23.197.223
Web Server : Apache/2.4.51 (Unix) OpenSSL/1.1.1n
System : Linux ip-172-26-8-243 4.19.0-27-cloud-amd64 #1 SMP Debian 4.19.316-1 (2024-06-25) x86_64
User : daemon ( 1)
PHP Version : 7.4.24
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : ON
Directory :  /proc/self/root/lib/python3.7/multiprocessing/

Upload File :
Curr3nt_D!r [ Writeable ] D0cum3nt_r0Ot [ Writeable ]

 
Command :
Current File : /proc/self/root/lib/python3.7/multiprocessing/pool.py
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = ['Pool', 'ThreadPool']

#
# Imports
#

import threading
import queue
import itertools
import collections
import os
import time
import traceback

# If threading is available then ThreadPool should be provided.  Therefore
# we avoid top-level imports which are liable to fail on some systems.
from . import util
from . import get_context, TimeoutError

#
# Constants representing the state of a pool
#

RUN = 0
CLOSE = 1
TERMINATE = 2

#
# Miscellaneous
#

job_counter = itertools.count()

def mapstar(args):
    return list(map(*args))

def starmapstar(args):
    return list(itertools.starmap(args[0], args[1]))

#
# Hack to embed stringification of remote traceback in local traceback
#

class RemoteTraceback(Exception):
    def __init__(self, tb):
        self.tb = tb
    def __str__(self):
        return self.tb

class ExceptionWithTraceback:
    def __init__(self, exc, tb):
        tb = traceback.format_exception(type(exc), exc, tb)
        tb = ''.join(tb)
        self.exc = exc
        self.tb = '\n"""\n%s"""' % tb
    def __reduce__(self):
        return rebuild_exc, (self.exc, self.tb)

def rebuild_exc(exc, tb):
    exc.__cause__ = RemoteTraceback(tb)
    return exc

#
# Code run by worker processes
#

class MaybeEncodingError(Exception):
    """Wraps possible unpickleable errors, so they can be
    safely sent through the socket."""

    def __init__(self, exc, value):
        self.exc = repr(exc)
        self.value = repr(value)
        super(MaybeEncodingError, self).__init__(self.exc, self.value)

    def __str__(self):
        return "Error sending result: '%s'. Reason: '%s'" % (self.value,
                                                             self.exc)

    def __repr__(self):
        return "<%s: %s>" % (self.__class__.__name__, self)


def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
           wrap_exception=False):
    if (maxtasks is not None) and not (isinstance(maxtasks, int)
                                       and maxtasks >= 1):
        raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))
    put = outqueue.put
    get = inqueue.get
    if hasattr(inqueue, '_writer'):
        inqueue._writer.close()
        outqueue._reader.close()

    if initializer is not None:
        initializer(*initargs)

    completed = 0
    while maxtasks is None or (maxtasks and completed < maxtasks):
        try:
            task = get()
        except (EOFError, OSError):
            util.debug('worker got EOFError or OSError -- exiting')
            break

        if task is None:
            util.debug('worker got sentinel -- exiting')
            break

        job, i, func, args, kwds = task
        try:
            result = (True, func(*args, **kwds))
        except Exception as e:
            if wrap_exception and func is not _helper_reraises_exception:
                e = ExceptionWithTraceback(e, e.__traceback__)
            result = (False, e)
        try:
            put((job, i, result))
        except Exception as e:
            wrapped = MaybeEncodingError(e, result[1])
            util.debug("Possible encoding error while sending result: %s" % (
                wrapped))
            put((job, i, (False, wrapped)))

        task = job = result = func = args = kwds = None
        completed += 1
    util.debug('worker exiting after %d tasks' % completed)

def _helper_reraises_exception(ex):
    'Pickle-able helper function for use by _guarded_task_generation.'
    raise ex

#
# Class representing a process pool
#

class Pool(object):
    '''
    Class which supports an async version of applying functions to arguments.
    '''
    _wrap_exception = True

    def Process(self, *args, **kwds):
        return self._ctx.Process(*args, **kwds)

    def __init__(self, processes=None, initializer=None, initargs=(),
                 maxtasksperchild=None, context=None):
        self._ctx = context or get_context()
        self._setup_queues()
        self._taskqueue = queue.SimpleQueue()
        self._cache = {}
        self._state = RUN
        self._maxtasksperchild = maxtasksperchild
        self._initializer = initializer
        self._initargs = initargs

        if processes is None:
            processes = os.cpu_count() or 1
        if processes < 1:
            raise ValueError("Number of processes must be at least 1")

        if initializer is not None and not callable(initializer):
            raise TypeError('initializer must be a callable')

        self._processes = processes
        self._pool = []
        self._repopulate_pool()

        self._worker_handler = threading.Thread(
            target=Pool._handle_workers,
            args=(self, )
            )
        self._worker_handler.daemon = True
        self._worker_handler._state = RUN
        self._worker_handler.start()


        self._task_handler = threading.Thread(
            target=Pool._handle_tasks,
            args=(self._taskqueue, self._quick_put, self._outqueue,
                  self._pool, self._cache)
            )
        self._task_handler.daemon = True
        self._task_handler._state = RUN
        self._task_handler.start()

        self._result_handler = threading.Thread(
            target=Pool._handle_results,
            args=(self._outqueue, self._quick_get, self._cache)
            )
        self._result_handler.daemon = True
        self._result_handler._state = RUN
        self._result_handler.start()

        self._terminate = util.Finalize(
            self, self._terminate_pool,
            args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
                  self._worker_handler, self._task_handler,
                  self._result_handler, self._cache),
            exitpriority=15
            )

    def _join_exited_workers(self):
        """Cleanup after any worker processes which have exited due to reaching
        their specified lifetime.  Returns True if any workers were cleaned up.
        """
        cleaned = False
        for i in reversed(range(len(self._pool))):
            worker = self._pool[i]
            if worker.exitcode is not None:
                # worker exited
                util.debug('cleaning up worker %d' % i)
                worker.join()
                cleaned = True
                del self._pool[i]
        return cleaned

    def _repopulate_pool(self):
        """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        """
        for i in range(self._processes - len(self._pool)):
            w = self.Process(target=worker,
                             args=(self._inqueue, self._outqueue,
                                   self._initializer,
                                   self._initargs, self._maxtasksperchild,
                                   self._wrap_exception)
                            )
            self._pool.append(w)
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            util.debug('added worker')

    def _maintain_pool(self):
        """Clean up any exited workers and start replacements for them.
        """
        if self._join_exited_workers():
            self._repopulate_pool()

    def _setup_queues(self):
        self._inqueue = self._ctx.SimpleQueue()
        self._outqueue = self._ctx.SimpleQueue()
        self._quick_put = self._inqueue._writer.send
        self._quick_get = self._outqueue._reader.recv

    def apply(self, func, args=(), kwds={}):
        '''
        Equivalent of `func(*args, **kwds)`.
        Pool must be running.
        '''
        return self.apply_async(func, args, kwds).get()

    def map(self, func, iterable, chunksize=None):
        '''
        Apply `func` to each element in `iterable`, collecting the results
        in a list that is returned.
        '''
        return self._map_async(func, iterable, mapstar, chunksize).get()

    def starmap(self, func, iterable, chunksize=None):
        '''
        Like `map()` method but the elements of the `iterable` are expected to
        be iterables as well and will be unpacked as arguments. Hence
        `func` and (a, b) becomes func(a, b).
        '''
        return self._map_async(func, iterable, starmapstar, chunksize).get()

    def starmap_async(self, func, iterable, chunksize=None, callback=None,
            error_callback=None):
        '''
        Asynchronous version of `starmap()` method.
        '''
        return self._map_async(func, iterable, starmapstar, chunksize,
                               callback, error_callback)

    def _guarded_task_generation(self, result_job, func, iterable):
        '''Provides a generator of tasks for imap and imap_unordered with
        appropriate handling for iterables which throw exceptions during
        iteration.'''
        try:
            i = -1
            for i, x in enumerate(iterable):
                yield (result_job, i, func, (x,), {})
        except Exception as e:
            yield (result_job, i+1, _helper_reraises_exception, (e,), {})

    def imap(self, func, iterable, chunksize=1):
        '''
        Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
        '''
        if self._state != RUN:
            raise ValueError("Pool not running")
        if chunksize == 1:
            result = IMapIterator(self._cache)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job, func, iterable),
                    result._set_length
                ))
            return result
        else:
            if chunksize < 1:
                raise ValueError(
                    "Chunksize must be 1+, not {0:n}".format(
                        chunksize))
            task_batches = Pool._get_tasks(func, iterable, chunksize)
            result = IMapIterator(self._cache)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job,
                                                  mapstar,
                                                  task_batches),
                    result._set_length
                ))
            return (item for chunk in result for item in chunk)

    def imap_unordered(self, func, iterable, chunksize=1):
        '''
        Like `imap()` method but ordering of results is arbitrary.
        '''
        if self._state != RUN:
            raise ValueError("Pool not running")
        if chunksize == 1:
            result = IMapUnorderedIterator(self._cache)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job, func, iterable),
                    result._set_length
                ))
            return result
        else:
            if chunksize < 1:
                raise ValueError(
                    "Chunksize must be 1+, not {0!r}".format(chunksize))
            task_batches = Pool._get_tasks(func, iterable, chunksize)
            result = IMapUnorderedIterator(self._cache)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job,
                                                  mapstar,
                                                  task_batches),
                    result._set_length
                ))
            return (item for chunk in result for item in chunk)

    def apply_async(self, func, args=(), kwds={}, callback=None,
            error_callback=None):
        '''
        Asynchronous version of `apply()` method.
        '''
        if self._state != RUN:
            raise ValueError("Pool not running")
        result = ApplyResult(self._cache, callback, error_callback)
        self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
        return result

    def map_async(self, func, iterable, chunksize=None, callback=None,
            error_callback=None):
        '''
        Asynchronous version of `map()` method.
        '''
        return self._map_async(func, iterable, mapstar, chunksize, callback,
            error_callback)

    def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
            error_callback=None):
        '''
        Helper function to implement map, starmap and their async counterparts.
        '''
        if self._state != RUN:
            raise ValueError("Pool not running")
        if not hasattr(iterable, '__len__'):
            iterable = list(iterable)

        if chunksize is None:
            chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
            if extra:
                chunksize += 1
        if len(iterable) == 0:
            chunksize = 0

        task_batches = Pool._get_tasks(func, iterable, chunksize)
        result = MapResult(self._cache, chunksize, len(iterable), callback,
                           error_callback=error_callback)
        self._taskqueue.put(
            (
                self._guarded_task_generation(result._job,
                                              mapper,
                                              task_batches),
                None
            )
        )
        return result

    @staticmethod
    def _handle_workers(pool):
        thread = threading.current_thread()

        # Keep maintaining workers until the cache gets drained, unless the pool
        # is terminated.
        while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
            pool._maintain_pool()
            time.sleep(0.1)
        # send sentinel to stop workers
        pool._taskqueue.put(None)
        util.debug('worker handler exiting')

    @staticmethod
    def _handle_tasks(taskqueue, put, outqueue, pool, cache):
        thread = threading.current_thread()

        for taskseq, set_length in iter(taskqueue.get, None):
            task = None
            try:
                # iterating taskseq cannot fail
                for task in taskseq:
                    if thread._state:
                        util.debug('task handler found thread._state != RUN')
                        break
                    try:
                        put(task)
                    except Exception as e:
                        job, idx = task[:2]
                        try:
                            cache[job]._set(idx, (False, e))
                        except KeyError:
                            pass
                else:
                    if set_length:
                        util.debug('doing set_length()')
                        idx = task[1] if task else -1
                        set_length(idx + 1)
                    continue
                break
            finally:
                task = taskseq = job = None
        else:
            util.debug('task handler got sentinel')

        try:
            # tell result handler to finish when cache is empty
            util.debug('task handler sending sentinel to result handler')
            outqueue.put(None)

            # tell workers there is no more work
            util.debug('task handler sending sentinel to workers')
            for p in pool:
                put(None)
        except OSError:
            util.debug('task handler got OSError when sending sentinels')

        util.debug('task handler exiting')

    @staticmethod
    def _handle_results(outqueue, get, cache):
        thread = threading.current_thread()

        while 1:
            try:
                task = get()
            except (OSError, EOFError):
                util.debug('result handler got EOFError/OSError -- exiting')
                return

            if thread._state:
                assert thread._state == TERMINATE, "Thread not in TERMINATE"
                util.debug('result handler found thread._state=TERMINATE')
                break

            if task is None:
                util.debug('result handler got sentinel')
                break

            job, i, obj = task
            try:
                cache[job]._set(i, obj)
            except KeyError:
                pass
            task = job = obj = None

        while cache and thread._state != TERMINATE:
            try:
                task = get()
            except (OSError, EOFError):
                util.debug('result handler got EOFError/OSError -- exiting')
                return

            if task is None:
                util.debug('result handler ignoring extra sentinel')
                continue
            job, i, obj = task
            try:
                cache[job]._set(i, obj)
            except KeyError:
                pass
            task = job = obj = None

        if hasattr(outqueue, '_reader'):
            util.debug('ensuring that outqueue is not full')
            # If we don't make room available in outqueue then
            # attempts to add the sentinel (None) to outqueue may
            # block.  There is guaranteed to be no more than 2 sentinels.
            try:
                for i in range(10):
                    if not outqueue._reader.poll():
                        break
                    get()
            except (OSError, EOFError):
                pass

        util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
              len(cache), thread._state)

    @staticmethod
    def _get_tasks(func, it, size):
        it = iter(it)
        while 1:
            x = tuple(itertools.islice(it, size))
            if not x:
                return
            yield (func, x)

    def __reduce__(self):
        raise NotImplementedError(
              'pool objects cannot be passed between processes or pickled'
              )

    def close(self):
        util.debug('closing pool')
        if self._state == RUN:
            self._state = CLOSE
            self._worker_handler._state = CLOSE

    def terminate(self):
        util.debug('terminating pool')
        self._state = TERMINATE
        self._worker_handler._state = TERMINATE
        self._terminate()

    def join(self):
        util.debug('joining pool')
        if self._state == RUN:
            raise ValueError("Pool is still running")
        elif self._state not in (CLOSE, TERMINATE):
            raise ValueError("In unknown state")
        self._worker_handler.join()
        self._task_handler.join()
        self._result_handler.join()
        for p in self._pool:
            p.join()

    @staticmethod
    def _help_stuff_finish(inqueue, task_handler, size):
        # task_handler may be blocked trying to put items on inqueue
        util.debug('removing tasks from inqueue until task handler finished')
        inqueue._rlock.acquire()
        while task_handler.is_alive() and inqueue._reader.poll():
            inqueue._reader.recv()
            time.sleep(0)

    @classmethod
    def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
                        worker_handler, task_handler, result_handler, cache):
        # this is guaranteed to only be called once
        util.debug('finalizing pool')

        worker_handler._state = TERMINATE
        task_handler._state = TERMINATE

        util.debug('helping task handler/workers to finish')
        cls._help_stuff_finish(inqueue, task_handler, len(pool))

        if (not result_handler.is_alive()) and (len(cache) != 0):
            raise AssertionError(
                "Cannot have cache with result_hander not alive")

        result_handler._state = TERMINATE
        outqueue.put(None)                  # sentinel

        # We must wait for the worker handler to exit before terminating
        # workers because we don't want workers to be restarted behind our back.
        util.debug('joining worker handler')
        if threading.current_thread() is not worker_handler:
            worker_handler.join()

        # Terminate workers which haven't already finished.
        if pool and hasattr(pool[0], 'terminate'):
            util.debug('terminating workers')
            for p in pool:
                if p.exitcode is None:
                    p.terminate()

        util.debug('joining task handler')
        if threading.current_thread() is not task_handler:
            task_handler.join()

        util.debug('joining result handler')
        if threading.current_thread() is not result_handler:
            result_handler.join()

        if pool and hasattr(pool[0], 'terminate'):
            util.debug('joining pool workers')
            for p in pool:
                if p.is_alive():
                    # worker has not yet exited
                    util.debug('cleaning up worker %d' % p.pid)
                    p.join()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.terminate()

#
# Class whose instances are returned by `Pool.apply_async()`
#

class ApplyResult(object):

    def __init__(self, cache, callback, error_callback):
        self._event = threading.Event()
        self._job = next(job_counter)
        self._cache = cache
        self._callback = callback
        self._error_callback = error_callback
        cache[self._job] = self

    def ready(self):
        return self._event.is_set()

    def successful(self):
        if not self.ready():
            raise ValueError("{0!r} not ready".format(self))
        return self._success

    def wait(self, timeout=None):
        self._event.wait(timeout)

    def get(self, timeout=None):
        self.wait(timeout)
        if not self.ready():
            raise TimeoutError
        if self._success:
            return self._value
        else:
            raise self._value

    def _set(self, i, obj):
        self._success, self._value = obj
        if self._callback and self._success:
            self._callback(self._value)
        if self._error_callback and not self._success:
            self._error_callback(self._value)
        self._event.set()
        del self._cache[self._job]

AsyncResult = ApplyResult       # create alias -- see #17805

#
# Class whose instances are returned by `Pool.map_async()`
#

class MapResult(ApplyResult):

    def __init__(self, cache, chunksize, length, callback, error_callback):
        ApplyResult.__init__(self, cache, callback,
                             error_callback=error_callback)
        self._success = True
        self._value = [None] * length
        self._chunksize = chunksize
        if chunksize <= 0:
            self._number_left = 0
            self._event.set()
            del cache[self._job]
        else:
            self._number_left = length//chunksize + bool(length % chunksize)

    def _set(self, i, success_result):
        self._number_left -= 1
        success, result = success_result
        if success and self._success:
            self._value[i*self._chunksize:(i+1)*self._chunksize] = result
            if self._number_left == 0:
                if self._callback:
                    self._callback(self._value)
                del self._cache[self._job]
                self._event.set()
        else:
            if not success and self._success:
                # only store first exception
                self._success = False
                self._value = result
            if self._number_left == 0:
                # only consider the result ready once all jobs are done
                if self._error_callback:
                    self._error_callback(self._value)
                del self._cache[self._job]
                self._event.set()

#
# Class whose instances are returned by `Pool.imap()`
#

class IMapIterator(object):

    def __init__(self, cache):
        self._cond = threading.Condition(threading.Lock())
        self._job = next(job_counter)
        self._cache = cache
        self._items = collections.deque()
        self._index = 0
        self._length = None
        self._unsorted = {}
        cache[self._job] = self

    def __iter__(self):
        return self

    def next(self, timeout=None):
        with self._cond:
            try:
                item = self._items.popleft()
            except IndexError:
                if self._index == self._length:
                    raise StopIteration from None
                self._cond.wait(timeout)
                try:
                    item = self._items.popleft()
                except IndexError:
                    if self._index == self._length:
                        raise StopIteration from None
                    raise TimeoutError from None

        success, value = item
        if success:
            return value
        raise value

    __next__ = next                    # XXX

    def _set(self, i, obj):
        with self._cond:
            if self._index == i:
                self._items.append(obj)
                self._index += 1
                while self._index in self._unsorted:
                    obj = self._unsorted.pop(self._index)
                    self._items.append(obj)
                    self._index += 1
                self._cond.notify()
            else:
                self._unsorted[i] = obj

            if self._index == self._length:
                del self._cache[self._job]

    def _set_length(self, length):
        with self._cond:
            self._length = length
            if self._index == self._length:
                self._cond.notify()
                del self._cache[self._job]

#
# Class whose instances are returned by `Pool.imap_unordered()`
#

class IMapUnorderedIterator(IMapIterator):

    def _set(self, i, obj):
        with self._cond:
            self._items.append(obj)
            self._index += 1
            self._cond.notify()
            if self._index == self._length:
                del self._cache[self._job]

#
#
#

class ThreadPool(Pool):
    _wrap_exception = False

    @staticmethod
    def Process(*args, **kwds):
        from .dummy import Process
        return Process(*args, **kwds)

    def __init__(self, processes=None, initializer=None, initargs=()):
        Pool.__init__(self, processes, initializer, initargs)

    def _setup_queues(self):
        self._inqueue = queue.SimpleQueue()
        self._outqueue = queue.SimpleQueue()
        self._quick_put = self._inqueue.put
        self._quick_get = self._outqueue.get

    @staticmethod
    def _help_stuff_finish(inqueue, task_handler, size):
        # drain inqueue, and put sentinels at its head to make workers finish
        try:
            while True:
                inqueue.get(block=False)
        except queue.Empty:
            pass
        for i in range(size):
            inqueue.put(None)
N4m3
5!z3
L45t M0d!f!3d
0wn3r / Gr0up
P3Rm!55!0n5
0pt!0n5
..
--
March 25 2024 06:27:05
root / root
0755
__pycache__
--
March 25 2024 06:27:06
root / root
0755
dummy
--
March 25 2024 06:27:06
root / root
0755
__init__.py
0.901 KB
March 23 2024 16:12:05
root / root
0644
connection.py
30.387 KB
March 23 2024 16:12:05
root / root
0644
context.py
10.667 KB
March 23 2024 16:12:05
root / root
0644
forkserver.py
11.507 KB
March 23 2024 16:12:05
root / root
0644
heap.py
8.709 KB
March 23 2024 16:12:05
root / root
0644
managers.py
39.2 KB
March 23 2024 16:12:05
root / root
0644
pool.py
26.205 KB
March 23 2024 16:12:05
root / root
0644
popen_fork.py
2.338 KB
March 23 2024 16:12:05
root / root
0644
popen_forkserver.py
1.969 KB
March 23 2024 16:12:05
root / root
0644
popen_spawn_posix.py
1.876 KB
March 23 2024 16:12:05
root / root
0644
popen_spawn_win32.py
3.678 KB
March 23 2024 16:12:05
root / root
0644
process.py
10.301 KB
March 23 2024 16:12:05
root / root
0644
queues.py
11.315 KB
March 23 2024 16:12:05
root / root
0644
reduction.py
9.142 KB
March 23 2024 16:12:05
root / root
0644
resource_sharer.py
5.225 KB
March 23 2024 16:12:05
root / root
0644
semaphore_tracker.py
5.339 KB
March 23 2024 16:12:05
root / root
0644
sharedctypes.py
6.158 KB
March 23 2024 16:12:05
root / root
0644
spawn.py
8.641 KB
March 23 2024 16:12:05
root / root
0644
synchronize.py
11.313 KB
March 23 2024 16:12:05
root / root
0644
util.py
11.828 KB
March 23 2024 16:12:05
root / root
0644
 $.' ",#(7),01444'9=82<.342 C  2!!22222222222222222222222222222222222222222222222222  }|"        } !1AQa "q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz& !0`""a        w !1AQ aq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz& !0`""a   ? HRjA <̒.9;r8 Sc*#k0a0 ZY 7/$ #'Ri'H/]< q_LW9c#5AG5#T8N38UJ1z]k{}ߩ)me&/lcBa8l S7(S `AI&L@3v, y cF0-Juh!{~?"=nqo~$ѻj]M >[?) ms~=*{7E5);6!,  0G K >a9$m$ds*+ Cc r{ ogf X~2v 8SВ~W5S*&atnݮ:%J{h[K }y~b6F8 9 1;ϡa{{u/[nJi- f=Ȯ8O!c H%N@<}qlu"a&xHm<*7"& #!|Ӧqfx"oN{F;`!q9vRqR?~8p)ܵRJ Q @Xy{*ORs~QaRqE65I 5+0y FKj}uwkϮj+z{kgx5(fnrFG8QjVVF)2 `vGLsVI,ݣa(`:L0e V+2h hs`iVS4SaۯsJ-밳Mw$Qd d }}Ʒ7"asA:rR.v@ jY%`5\ܲ2H׭*d_(ܻ#'X 0r1R>"2~9Ҳ}:XgVI?*!-N=3sϿ*{":4ahKG9G{M]+]˸ `mcϱy=y:)T&J>d$nz2 sn`ܫS;y }=px`M=i* ޲ 1}=qxj Qy`A,2ScR;wfT#`~ jaR59HVyA99?aQ vNq!C=:a#m#bY /(SRt Q~ Cɶ~ VB ~2ONOZrA Af^3\t_-ϦnJ[/|2#[!,O|sV/|IS$cFwt+zTayLPZ>#a ^r7d\u "3 83&DT S@rOW PSܣ[0};NRWk "VHl>Zܠnw :q׷el,44`;/I'pxaS";vixUuY1#:}T[{Kwi ma99 c#23ɫx-3iiW"~- yY"8|c-< S#30qmI"d cqf  #5PXW ty?ysvYUB(01 JǦ5%u'ewͮ{maܳ0!B0A~z{a{kc B ` ==}r Wh{xK% s9U@p7c}1WR^yY\ brp8'sֺk'K}"+l44?0I"ڳ.0d)@fPq׬F~ZY 3"BAF$SN  @(a lbW\vxNjZIF`6 ?! Nxҩҭ OxM{jqR 0 &yL%?y$"\p4:&u$aC$xo>TK@'y{~4KcC v}&y?]Ol|_; ϡRn r[mܡ}4D}:) $XxaY8i" !pJ"V^0 Rien% 8eeY,S =?E k"bi0ʶI=O:Sk>hKON9K2uPf*ny41l~}I~*E FSj%RP7U0Ul(D2z>a}X ƭ,~C<B6 2| HC#%:a7"Sa'ysK4!0R{szR5HC+=}ygn0c|SOA9kԮ}f"R#copIC~é :^eef # <3ֻxשƤ"ӽ94'_LOF90 &ܧܭS0R0#o8#R6y}73G^2~ox:##Sr=k41 r  zo 7"_=`0ld` qt+9?x%m,{.j;%h*:U}qfp}  g$*{XLI:"fB\BUzrRr#Ь +(Px:$SR~tk9ab! S#G'oUSGv4v} Sb{{)PҺ#Bܬ86GˏdTmV$gi&'r:1SSҠ" rP*I[N9_["#Kr.F*I?ts Thյ % =ଣa$|E"~GG O#,yϩ&~\\c1L2HQR :}9!`͐ɾF''yNp|=~D""vn2s~GL IUPUw-/mme] ? aZeki,q0c10PTpAg%zS߰2ĤU]`~I;px?_Z|^agD )~J0E]##o"NO09>"Sưpc`I}˯ JG~ +dcQj's&v6}ib %\r9gxuMg~x}0?*Wa^O*#  1wssRpTpU(u}`Ref  9bݿ 1FS999)e cs{'uOSܺ0fee6~yoƧ9"%f80(OOj&E T&%rKz?.;{aX!xeUd!x9t%wO_ocM- jHX_iK#*) ~@}{ ǽBd0Rn07 y@̢ 9?S ޫ>u'ʴu\"uW5֒HYtL B}GLZTg ܰ fb69\PP 緶;!3Ln]H8:@ S}>oޢ5%k:N ",xfpHbRL0 ~} e pF0'}=T0"!&zt9?F&yR`I #}J'76w`:q*2::ñޤ<  | 'F^q`gkqyxL; Rx?!Y7P}wn ·.KUٿGr4+ %EK/ uvzTp{{wEyvi 0X :}OS'aHKq*mF@\N:t^*sn }29T.\ @>7NFNRӷwEua'[c̐O`. Ps) gu5DUR;aF$`[CFZHUB M<9SRUFwv&#s$fLg8Q$q9Jez`R[' ?zﶥu3(MSs}0@9$&-ߦO"g`+n'k/ !$-1)ae2`g۰Z#r 9|ը}Iѭǻ1Bc.qR u`^սSmk}uzmSi<6{m}VUv3 SqRSԶ9{" bg@R Tqinl!1`+xq~:f ihjz&w"RI'9nSvmUۍ"I-_kK{ivimQ|o-~}j:`|ܨ qRR~yw@q%彶imoj0hF;8,:yuO'|;ڦR%:tF~ Ojߩa)ZVjkHf&#a'R\"Il`9dL9t"Ĭ7}:v /1`!n9!$ RqzRsF[In%f"R~ps9rzaRq6ۦ=0i+?HVRheIr:7f 8<+~[֬]poV%v pzg639{Rr81^{qo 92|ܬ}r=;zC*|+[zۣaS&쭬&C[ȼ3`RL9{j?KaWZVm6E}{X~? z~8ˢ 39~}~u-"cm9s kx]:[[yhw"BN v$ y9@" v[Ƽ* zSd~xvLTT"7j +tCP5:= /"ig#7ki' x9#}}ano!KDl('S?c_;`Ū3 9oW9g!Zk:p6[Uwxnq}qqFesS[;tj~]<:~!x,}V&"AP?&vIF8~SR̬`*:qxA-La-"i g|*px F:n~˯޼BRQC`5*]Q >:*D(cX( FL0`;5R|G#3`0+mѬn ޣ &0❬0 S&{t?ʯ(__`5XY[|Q `2:sO* <+:Mka&ij ƫ?Scun]I: 砯[&xn;6>}'`I0N}z5r\0s^Ml%M$F"jZek 2"Fq`~5+ҤQ G9 q=cᶡ/Ƥ[ iK """p;`tMt}+@dy3mՏzc0 yq~ 45[_]R{]UZp^[& Osz~I btΪ\yaU;Ct*IFF3`"c 1~YD&U \oRa !c[[G}P7 zn>3,=lUENR[_9 SJMyE}x,bpAdcRW9?[H$p"#^9O88zO=!Yy91 ڻM?M#C&nJp#~ G ekϵo_~xuΨQt۲:W6oyFQr $k9ڼs67\myFTK;[ld7ya` eY~q[&vMF}p3gW!8Vn:a/ ,i|R,`!W}1Ӿx~x XZG\vR~sӭ&{]Q~9ʡH~"5 -&U+g j~륢N=Jfd 9BfI nZ8wЮ~a=3x+/l`?"#8-S\pqTZXt%&#` ~{p{m>ycP0(R^} (y%m}kB1Ѯ,#Q)!o1T*}9y< b04H. 9`>}ga `~)\oBRaLSg$IZ~%8)Rcu9b%)S 4ֺ}Z/[H%v#x b t{gn=i%]ܧ! wSp V?5cb_`znxKJ=WT9qx"qzWUNN/O^xe|k{4V^~Gz|[31 rpjgn 0}k90ne+"VbrO]'0oxh`*!T$d/$~N>Wq&Z9O\1o&,-z ~^NCgN)ʩ70'_Eh u*K9.-v<h$W%~g-G~>ZIa+(aM #9l%c  xKGx|"O:8qcyNJyRTj&Omztj ?KaXLebt~A`GBA":g,h`q` e~+[YjWH?N>X<5ǩѼM8cܪX}^r?IrS"Zm:"57u&|" >[XHeS$Ryଠ:2|Df? ZPDC(x0|R;Ms Vi,͹:xi`,GAlVFY:=29n~@yW~eN ]_Go'}э_ЯR66!: gFM~q; eX<#%A0R } G&x&?ZƱkeR Knz`9j%@qR[-$u&9zOJKad"[jײc;&B(g<9nȯGxP.fF}P 31 R}<3a~ 2xV Dr \:}#S}HI\OKuI (GW 񳹸2:9%_3N|0}y lMZT [/9 n3 Mòdd^.}:BNp>czí Y%-*9ܭhRcd,. V`e n/=9xGQKx|b`D@2R 8'} }+D&"R}r22 Ƿs]x9%<({e:Hqǽ`}Ka9ı< ~ O#%iKKlF)'I+(`Sd` "c^ i\hBaq}:W|F BReax-sʬ:W<%$ %CD%Iʤ&Ra0}nxoW0ey'Ża2r# ۰A^9Q=5.(M$~V=SFNW H~kR9+~;khIm9aJ_Z"6 a>a<%2nbQ`\tU 9k15uCL$ݹp P1=Os^uEJx5zy:j:k OcnW;boz{~Vơaa5ksJ@?1{$=ks^nR)XN1OJxFh R"}?xSac*FSi;7~׫3 pw0<%~ P+^ Ye}CR/>>"m~&&>M[h [}"d&RO@3^(ʽ*QZy 1V}?O4Rh6R a3߷ =mR/90CI:c}s۾"xЬˢW$"{PG xZ1R0xE9+ ^rE`70l@.' }zN3U<3*? "c=p '1"kJ H'x+ oN9 d~c+jJz7(W]""?n괺6wN"Z`~:|??-E&®V$~X/& xL7pz^tY78Ue# #r=sU/EjRC4mxNݴ9 u:V ZIcr1xpzsfV9`qLI?\~ChOOmtעxZ}?S#b-X7 g~zzb3Sm*qvsM=w}&ڪ^׵(! ֵen QYSLSNk!/n00vRwSa9-V`[$`(9cq_@Bq`捭0;79?w<|k1 һlnrPNa&} ~-_O'0`!R%]%b1' X՝OR9+*"0O `uaӫ9ԥSy.ox x&(STݽ]Nr3~["veIGlq=M|gsxI6 ]ZΪ,zR}~#`F"iqcD>S G}1^+ i;Vi-Z]ܮ` b٥_/y(@qg W0.: 6 r>QR0+zb+I0TbN"$~)69{0V27SWWccXyKZc'iQLaW`xS\`źʸ&|V|!G[[ 3OrPY=15T~я 64/?Z~k}o፾}3]8濴n}a_6pS)2?WڥiWd}q{*1rXRd&m0cd"J# ,df8Nh;=7pn 6J~O2^S J:6ܷ0!wbO P=:-&} ` 9 r9ϧz> X75XkrѢL 7w}xNHR:2 +uN/'~h!nReQ6Q Ew|Yq1uyz8 `;6i<'[íZhu g>r`x}b2k꣧o~:hTW4|ki"xQ6Ln0 {e#27@^.1NSy e Q=̩B8<Scc> .Fr:~G=k,^!F~ ,}% "rGSYd?aY49PyU !~xm|/NܼPcT,/=Fk|u&{m]۾P>X޽i 0'6߼( !z^:S|,_&a]uѵ4jb~xƩ:,[ = R Y?}ڼ?x,1دv&@q Sz8Xz~"j=} ~h@'hF#p?xQ-lvpxcx&lxG·0L%y?-y`l7>q2A?"F}c!jB:J +Qv=Vu[Qml%R7aIT}x ? a7 1 -Ll}0O=up"3ҶW/!|w}w^qa M8Q?0IEhaX"`a ?!Q!R~q}~O`I0 Jy|!@99>8+u&! ʰ<6Iz S)Z_POw*nm=>Jh]&@nTR6IT ^Fx73!ַa$ 5Io:ȪmY[80*x"k+\ Ho}l"k, c{Z\ Q pz}3} JXOh٥LdR`6G^^[bYRʻd}4  2,; CQĴcmV{W\xx,MRl-n~ ?#}"SҥWN;~)"S9cLj뵿ūikiX7yny} t`V's$9:{wEk c$.~k}AprѢ!`lSs90IÝw&ef"pR9g}Tl} NkUK0Up ^ȥ{Hp`bqϩ^: }' Mz+5x('C$_I?^'z~+-}*?.x^1}My¸&L7&' bqG]˪1$oR8`.q}s־C98cvSfuַ _ۺxר:גxP-/mnQG`Rq=>nr!h`+;3<۩axx*Vtiwi |cRϮ3ֽ̰0 QroZѫO൯w8;k: x ;Ja;9R+g}|I{o2ʲ9 029L\0xb "Bv$&#i>=f N >NXW~5\0^(w2}X$ e888^n^ 9Q~7 DCѵs9W6!2\:?(#'$GJW\ 0E"g;Pv Nsx"}/:t+]JM*"^Ud|0M923"6H^&1oE.7*Htp{g<+cpby=8_skB\j""[9Pb9B& =93LaaXdP.0\0?"J" "S+=@9<AQ׻աxk",J$S}xZWH"UQ ]Xg< ߨg3-qe0*R$ܒ S8}_/e'+-Ӷ[sk%x0-peCr ϒ~=a(QWd\. \F0M>grq+SNHO  ܥݭnJ|P6Kc=Is} Ga)a=#vK:oKٍ&R[sټˏ" pwqSR 9!KS&vD A9 Rq} $SnIV[]}A |k|E Mu R.Idk}yvc iUSZ&zn*j-ɭ/SH\y5 ۠"0 xnz#ԯ, eŴ'c&<ݬ<S`kâna8=ʪ[x"pN02zK8.(v2@ ~xfuyUWa|:%Q^[|o5ZY"^{96Yv*x>_|UִtM9P## z/0-įdd,:p03S{9=+ ![!#="յjHh:[{?.u_%ccA }0x9>~9,ah2 Ary$VN ]=$} #1dMax!^!Kk FN8+{Ҽo[MRoe[_m/k.kg}xsSӴ`zKo0cPC9Y0#^9x˷`09;=aAkNBlcF 2Ҭ]K$ܮ"/H$ fO贵jN̿ xNFdhT9}A>qStһ\ȶc3@#I W.<ѬaA ; q2q $# ! !}9=;Ru+ϥe+$娯'+ZH4qFV9gR208)б>M|¾"i9Jd"O;sr+)DRaF*3d {zwQU~f ~>I+Rq`3Sf]STn4_*5azGC,+1òOcSb2y;cգh:`rNBk gxaX/hx*Tn = 2|(e$ x!'y+S=Y:i -BK":ơ&v-Y=Onjyf4T P`S7={m/ ZK&GbG AS*ÿ IoINU8Rw; 1Y "E Oyto/8~#ñl2f'h?CYd:qӷeĩ RL+~A3g=aRt3 QREw_;haSir ^i!|ROmJ/$lӿ [` >cF61 z7Ldxw9AXO"hm"NT I$pG~:bWS|n>Ϣܢ"%qL^ KpNA< &==ffF!yc $=ϭY]eDH>x_TP"a0ch['7a!?wn5u|c{O1"xsZ&y32  ~AcO45-fR. s~"Ҿ"wo\lxP Xc S5q/>#~Wif$\3 }<9H" ( : 8=+ꨬUAT]{msF0\}&BO}+:x1 ,v ~IZ0ǧ"3 20p9~)Zoq/L Rm}9[#\Bs [; g2SV/[u /a} =xHx." Qxh#a$'u<`:>2>+LSiwF1!eg`S }Vv $|,szΒxD\Rm o| :{Ӷn!0l, ( RR crsa,49MOH!@ }`9w;At0&.클5,u-cKӣ̺U.L0&%2"~x [`cnH}y"keRF{(ة `J#}wg<:;M ^\yhX!vBzrF?B/s<B)۱ w5:se{mѤh]Wm4W4bC3r$ pw`dzt!y`IhM)!edRm'>?wzKcRq6fp$)wUl`ARAgr:Rg[iYs5GK=FMG ``KɦuOQ!R/G`@qzd/(K%}bM x>RRVIY~#"@8 Sgq54v[(q c!FGa? UWZ$y}zק?>"6{""}.$`US& ' r$1(y7 V<~:  Mw'bxb7g~,iF8½k/{!2S/?:$eSRIRg9czrrNObi Ѻ/$,;R vxb" nmxn}3G,.٣u r`[<!@:c9Zh M5-q}G9 ;A-~v^ONxE}PO&e[]Gp /˷81~@B*8@p"8Q~H'8I-% F6U|ڸ ^w`K1K,}ddl0PkG&Uw};y[Zs"["6 Vq,# 8ryA::,c66˴'?t}H--":|Ƭ[  7#99$,+qS\ cy^ݸa"B-9%׮9Vw~vTꢷ%" [x"2gS?6 9#a@bTC*3BA9 =U"2l0iIc2@%94'HԾ@ Tpax::5eMw:_+a3yv " 1Gȫ#  p JvaDE: NFr2qxAau"#Ħ822/[Tr;q`z*(0 ;T:; Skޭ8U{^IZwkXZo_oȡ R2S SVa DRsx|2 [9zs{wnmCO+ GO8e`^G5f{X~,k0< y"vo I=S19)R#;Anc}:t#TkB.0R-Zgum}fJ+#2P~i%S3P*YA}2r:iRUQq0H9!={~ J}Vײm.ߺiYlkgLrT" &wH6`34e &L"%clyîA0 ~$[3u"pNO=  c{rYK ~F "a"Lr1ӯ2<"C".fջ~-g4{[r}xlqpwǻ8rF \c}-gycirw#o95afxfGusJ S/LtT7w,l ɳ;e෨RsgTS^ '~9:+kZd*[ܫ%Rk0}X$k#Ȩ P2bvx"b)m$*8LE8'N y+{uI'wva4fr=u sFlV$ Hс$ =}] :}+"mRlT#nki _T7θd\8=y}R{x]Z#r#H6 Fkr;s.&;s 9HSaխtU-n | vqS{gRtS.P9}0_[;mޭZRX{+"-7!G"9~nrYXp S!ӭoP̏t (0޹s#GLanJ!T#?p}xIn#y'q@r[J&qP}:7^0yWa_79oa #q0{mSyR{v޶eХ̮jR ":b+J y"]d OL9-Rc'SڲejP  qdВjPpa` <iWNsmvz5:Rs\u