PK!!eNNnewio/__init__.pyfrom .api import * # noqa:F401,F403 from . import api __all__ = api.__all__ PK!RBBnewio/_socket.pyfrom contextlib import contextmanager from socket import SOL_SOCKET, SO_ERROR from .api import wait_read, wait_write, FileDescriptor try: from ssl import SSLWantReadError, SSLWantWriteError WantRead = (BlockingIOError, InterruptedError, SSLWantReadError) WantWrite = (BlockingIOError, InterruptedError, SSLWantWriteError) except ImportError: WantRead = (BlockingIOError, InterruptedError) WantWrite = (BlockingIOError, InterruptedError) class Socket: ''' Non-blocking wrapper around a socket object. The original socket is put into a non-blocking mode when it's wrapped. ''' def __init__(self, sock): self._socket = sock self._socket.setblocking(False) self._fd = FileDescriptor(sock.fileno()) # Commonly used bound methods self._socket_send = sock.send self._socket_recv = sock.recv def __repr__(self): return f'' def __getattr__(self, name): return getattr(self._socket, name) def fileno(self): return self._fd.fileno() def settimeout(self, seconds): raise RuntimeError('Use newio.timeout() to set a timeout') def gettimeout(self): return None def dup(self): return type(self)(self._socket.dup()) @contextmanager def blocking(self): ''' Allow temporary access to the underlying socket in blocking mode ''' try: self._socket.setblocking(True) yield self._socket finally: self._socket.setblocking(False) async def recv(self, maxsize, flags=0): while True: try: return self._socket_recv(maxsize, flags) except WantRead: await wait_read(self._fd) except WantWrite: await wait_write(self._fd) async def recv_into(self, buffer, nbytes=0, flags=0): while True: try: return self._socket.recv_into(buffer, nbytes, flags) except WantRead: await wait_read(self._fd) except WantWrite: await wait_write(self._fd) async def send(self, data, flags=0): while True: try: return self._socket_send(data, flags) except WantWrite: await wait_write(self._fd) except WantRead: await wait_read(self._fd) async def sendall(self, data, flags=0): buffer = memoryview(data).cast('b') while buffer: try: nsent = self._socket_send(buffer, flags) buffer = buffer[nsent:] except WantWrite: await wait_write(self._fd) except WantRead: await wait_read(self._fd) async def accept(self): while True: try: client, addr = self._socket.accept() return type(self)(client), addr except WantRead: await wait_read(self._fd) async def connect_ex(self, address): try: await self.connect(address) return 0 except OSError as e: return e.errno async def connect(self, address): try: result = self._socket.connect(address) if getattr(self, 'do_handshake_on_connect', False): await self.do_handshake() return result except WantWrite: await wait_write(self._fd) err = self._socket.getsockopt(SOL_SOCKET, SO_ERROR) if err != 0: raise OSError(err, 'Connect call failed %s' % (address,)) if getattr(self, 'do_handshake_on_connect', False): await self.do_handshake() async def recvfrom(self, buffersize, flags=0): while True: try: return self._socket.recvfrom(buffersize, flags) except WantRead: await wait_read(self._fd) except WantWrite: await wait_write(self._fd) async def recvfrom_into(self, buffer, bytes=0, flags=0): while True: try: return self._socket.recvfrom_into(buffer, bytes, flags) except WantRead: await wait_read(self._fd) except WantWrite: await wait_write(self._fd) async def sendto(self, bytes, flags_or_address, address=None): if address: flags = flags_or_address else: address = flags_or_address flags = 0 while True: try: return self._socket.sendto(bytes, flags, address) except WantWrite: await wait_write(self._fd) except WantRead: await wait_read(self._fd) async def recvmsg(self, bufsize, ancbufsize=0, flags=0): while True: try: return self._socket.recvmsg(bufsize, ancbufsize, flags) except WantRead: await wait_read(self._fd) async def recvmsg_into(self, buffers, ancbufsize=0, flags=0): while True: try: return self._socket.recvmsg_into(buffers, ancbufsize, flags) except WantRead: await wait_read(self._fd) async def sendmsg(self, buffers, ancdata=(), flags=0, address=None): while True: try: return self._socket.sendmsg(buffers, ancdata, flags, address) except WantRead: await wait_write(self._fd) # Special functions for SSL async def do_handshake(self): while True: try: return self._socket.do_handshake() except WantRead: await wait_read(self._fd) except WantWrite: await wait_write(self._fd) # Design discussion. Why make close() async? Partly it's to make the # programming interface highly uniform with the other methods (all of which # involve an await). It's also to provide consistency with the Stream # API below which requires an asynchronous close to properly flush I/O # buffers. async def close(self): if self._socket: self._socket.close() self._socket = None # This is declared as async for the same reason as close() async def shutdown(self, how): if self._socket: self._socket.shutdown(how) async def __aenter__(self): self._socket.__enter__() return self async def __aexit__(self, *args): if self._socket: self._socket.__exit__(*args) self._socket = None PK!dS  newio/api.py'''Newio common API for users''' from . import syscall from .syscall import TaskCanceled, TaskTimeout, FileDescriptor, Task __all__ = ( 'TaskCanceled', 'TaskTimeout', 'FileDescriptor', 'Task', 'wait_read', 'wait_write', 'sleep', 'spawn', 'current_task', 'timeout', 'open_nursery', 'run_in_thread', 'run_in_process', ) async def wait_read(fd: FileDescriptor) -> None: '''Wait until fd readable''' await syscall.nio_wait_read(fd) async def wait_write(fd: FileDescriptor) -> None: '''Wait until fd writeable''' await syscall.nio_wait_write(fd) async def run_in_thread(fn, *args, **kwargs): '''Run fn in thread pool''' await syscall.nio_run_in_thread(fn, *args, **kwargs) async def run_in_process(fn, *args, **kwargs): '''Run fn in process pool''' await syscall.nio_run_in_process(fn, *args, **kwargs) async def sleep(seconds: float) -> None: '''Sleep at least seconds''' await syscall.nio_sleep(seconds) async def spawn(coro) -> Task: '''Spawn a task''' return await syscall.nio_spawn(coro) async def current_task() -> Task: '''Get current task''' return await syscall.nio_current_task() class timeout: '''Async context manager for task timeout''' def __init__(self, seconds: float): self._seconds = seconds self._timeout = None async def __aenter__(self): self._timeout = await syscall.nio_set_timeout(self._seconds) async def __aexit__(self, *exc_info): await syscall.nio_unset_timeout(self._timeout) class open_nursery: '''Nursery is manager of tasks, it will take care of it spawned tasks. All tasks spawned by the nursery are ensure stoped after nursery exited. You must explicitly join spawned tasks, otherwise they will be canceled after nursery exited. ''' def __init__(self): self._tasks = [] self._is_closed = False async def spawn(self, coro): '''Spawn task in the nursery''' if self._is_closed: raise RuntimeError('nursery already closed') task = await spawn(coro) self._tasks.append(task) return task async def __aenter__(self): if self._is_closed: raise RuntimeError('nursery already closed') return self async def __aexit__(self, *exc_info): self._is_closed = True for task in self._tasks: if task.is_alive: await task.cancel() for task in self._tasks: if task.is_alive: await task.join() PK!^Enewio/queue.pyfrom collections import deque from heapq import heappush, heappop from .sync import Condition __all__ = ('Queue', 'PriorityQueue', 'LifoQueue') class Queue: ''' A queue for communicating between tasks. ''' def __init__(self, maxsize=0): self.maxsize = maxsize self._get_waiting = Condition() self._put_waiting = Condition() self._join_waiting = Condition() self._task_count = 0 self._queue = self._init_internal_queue() def __repr__(self): name = type(self).__name__ return f'<{name}, size={self.qsize()}>' def _init_internal_queue(self): return deque() def qsize(self): return len(self._queue) def empty(self): return not self._queue def full(self): if self.maxsize is None: return False return self.qsize() >= self.maxsize async def get(self): while self.empty(): await self._get_waiting.wait() result = self._get() await self._put_waiting.notify() return result def _get(self): return self._queue.popleft() async def join(self): if self._task_count > 0: await self._join_waiting.wait() async def put(self, item): while self.full(): await self._put_waiting.wait() self._put(item) self._task_count += 1 await self._get_waiting.notify() def _put(self, item): self._queue.append(item) async def task_done(self): self._task_count -= 1 if self._task_count <= 0: await self._join_waiting.notify_all() class PriorityQueue(Queue): ''' A Queue that outputs an item with the lowest priority first Items have to be orderable objects ''' def _init_internal_queue(self): return [] def _put(self, item): heappush(self._queue, item) def _get(self): return heappop(self._queue) class LifoQueue(Queue): ''' Last In First Out queue Retrieves most recently added items first ''' def _init_internal_queue(self): return [] def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop() PK!FEnewio/socket.py''' A async version for the standard socket library. The entire contents of stdlib socket are made available here. The socket class is replaced by an async compatible version. ''' import socket as std __all__ = std.__all__ from socket import * # noqa: F401,F403 from functools import wraps from ._socket import Socket from .api import run_in_thread @wraps(std.socket) def socket(*args, **kwargs): return Socket(std.socket(*args, **kwargs)) @wraps(std.socketpair) def socketpair(*args, **kwargs): s1, s2 = std.socketpair(*args, **kwargs) return Socket(s1), Socket(s2) @wraps(std.fromfd) def fromfd(*args, **kwargs): return Socket(std.fromfd(*args, **kwargs)) @wraps(std.create_connection) async def create_connection(*args, **kwargs): sock = await run_in_thread(std.create_connection, *args, **kwargs) return Socket(sock) @wraps(std.getaddrinfo) async def getaddrinfo(*args, **kwargs): return await run_in_thread(std.getaddrinfo, *args, **kwargs) @wraps(std.getfqdn) async def getfqdn(*args, **kwargs): return await run_in_thread(std.getfqdn, *args, **kwargs) @wraps(std.gethostbyname) async def gethostbyname(*args, **kwargs): return await run_in_thread(std.gethostbyname, *args, **kwargs) @wraps(std.gethostbyname_ex) async def gethostbyname_ex(*args, **kwargs): return await run_in_thread(std.gethostbyname_ex, *args, **kwargs) @wraps(std.gethostname) async def gethostname(*args, **kwargs): return await run_in_thread(std.gethostname, *args, **kwargs) @wraps(std.gethostbyaddr) async def gethostbyaddr(*args, **kwargs): return await run_in_thread(std.gethostbyaddr, *args, **kwargs) @wraps(std.getnameinfo) async def getnameinfo(*args, **kwargs): return await run_in_thread(std.getnameinfo, *args, **kwargs) PK!-0[% % newio/ssl.py'''Wrapper around built-in SSL module''' __all__ = () from functools import wraps try: import ssl as _ssl from ssl import * # noqa: F401,F403 except ImportError: _ssl = None # We need these exceptions defined, even if ssl is not available. class SSLWantReadError(Exception): pass class SSLWantWriteError(Exception): pass from .api import run_in_thread from ._socket import Socket if _ssl: @wraps(_ssl.wrap_socket) async def wrap_socket(sock, *args, do_handshake_on_connect=True, **kwargs): if isinstance(sock, Socket): sock = sock._socket ssl_sock = _ssl.wrap_socket(sock, *args, do_handshake_on_connect=False, **kwargs) cssl_sock = Socket(ssl_sock) cssl_sock.do_handshake_on_connect = do_handshake_on_connect if do_handshake_on_connect and ssl_sock._connected: await cssl_sock.do_handshake() return cssl_sock @wraps(_ssl.get_server_certificate) async def get_server_certificate(*args, **kwargs): return await run_in_thread(_ssl.get_server_certificate, *args, **kwargs) # Small wrapper class to make sure the wrap_socket() method returns the right type class NewioSSLContext(object): def __init__(self, context): self._context = context def __getattr__(self, name): return getattr(self._context, name) async def wrap_socket(self, sock, *args, do_handshake_on_connect=True, **kwargs): sock = self._context.wrap_socket( sock._socket, *args, do_handshake_on_connect=False, **kwargs) csock = Socket(sock) csock.do_handshake_on_connect = do_handshake_on_connect if do_handshake_on_connect and sock._connected: await csock.do_handshake() return csock def __setattr__(self, name, value): if name == '_context': super().__setattr__(name, value) else: setattr(self._context, name, value) # Name alias def SSLContext(protocol): return NewioSSLContext(_ssl.SSLContext(protocol)) @wraps(_ssl.create_default_context) def create_default_context(*args, **kwargs): context = _ssl.create_default_context(*args, **kwargs) return NewioSSLContext(context) PK!Moo newio/sync.py'''synchronization primitives''' from .syscall import Futex class BrokenBarrierError(RuntimeError): '''Exception raised if the barrier is broken''' class Lock: def __init__(self): self._is_locked = False self._futex = Futex() async def __aenter__(self): await self.acquire() async def __aexit__(self, *exc_info): await self.release() async def acquire(self): if self._is_locked: await self._futex.wait() self._is_locked = True async def release(self): if not self._is_locked: raise RuntimeError('release unlocked lock') self._is_locked = False await self._futex.wake(1) def locked(self): return self._is_locked class Condition: def __init__(self): self._futex = Futex() async def wait(self): await self._futex.wait() async def notify(self, n=1): await self._futex.wake(n) async def notify_all(self): return self.notify(Futex.WAKE_ALL) class Semaphore: def __init__(self, value=1): if value < 0: raise ValueError('semaphore initial value must be >= 0') self._value = value self._futex = Futex() async def __aenter__(self): await self.acquire() async def __aexit__(self, *exc_info): await self.release() async def acquire(self): if self._value <= 0: await self._futex.wait() self._value -= 1 async def release(self): self._value += 1 if self._value == 1: await self._futex.wake(1) class BoundedSemaphore: def __init__(self, value=1): if value < 0: raise ValueError('semaphore initial value must be >= 0') self._value = value self._init_value = value self._futex = Futex() async def __aenter__(self): await self.acquire() async def __aexit__(self, *exc_info): await self.release() async def acquire(self): if self._value <= 0: await self._futex.wait() self._value -= 1 async def release(self): if self._value >= self._init_value: raise RuntimeError('semaphore released too many times') self._value += 1 if self._value == 1: await self._futex.wake(1) class Event: def __init__(self): self._futex = Futex() self._is_set = False def is_set(self): return self._is_set async def set(self): if self._is_set: return self._is_set = True await self._futex.wake(Futex.WAKE_ALL) def clear(self): self._is_set = False async def wait(self): if self._is_set: return await self._futex.wait() class Barrier: def __init__(self, parties, action=None): self._futex = Futex() self._parties = parties self._action = action self._count = 0 self._is_broken = False self._is_filled = False async def wait(self): if self._is_broken: raise BrokenBarrierError() if self._is_filled: return 0 index = self._count self._count += 1 try: if self._count >= self._parties: self._is_filled = True await self._futex.wake(Futex.WAKE_ALL) if self._action is not None: self._action() else: await self._futex.wait() except BaseException: self._is_broken = True raise finally: self._count -= 1 if self._is_broken: raise BrokenBarrierError() return index # TODO: implement barrier reset # async def reset(self): # pass async def abort(self): self._is_broken = True if self._count > 0: await self._futex.wake(Futex.WAKE_ALL) PK!x\enewio/syscall.py'''All syscalls are defined here for communicating with kernel Implementation Note: the **_nio_ref_** attributes is used to bind user object and kernel object, so as to efficiently exchange between user space and kernel space. ''' import time from types import coroutine class TaskCanceled(BaseException): ''' Exception raised when task cancelled. It's used to unwind coroutine stack and cleanup resources, the exception **MUST NOT** be catch without reraise. The exception directly inherits from BaseException instead of Exception so as to not be accidentally caught by code that catches Exception. ''' class TaskTimeout(Exception): '''Exception raised when task timeout.''' class FileDescriptor: '''A wrapper for file descriptor''' def __init__(self, fileno: int): self._fileno = fileno self._nio_ref_ = None def fileno(self) -> int: '''Get the underlying file descriptor''' return self._fileno def __repr__(self): return f'' class Timeout: '''A wrapper for kernel timeout object''' def __init__(self, kernel_timeout): self._nio_ref_ = kernel_timeout kernel_timeout._nio_ref_ = self @property def seconds(self) -> float: '''timeout seconds''' return self._nio_ref_.seconds @property def deadline(self) -> float: '''timeout deadline, based on time.monotonic()''' return self._nio_ref_.deadline @property def is_expired(self) -> bool: '''is timeout expired''' return self._nio_ref_.is_expired def __repr__(self): if self.is_expired: status = 'expired' else: remain = max(0, self.deadline - time.monotonic()) status = f'remain={remain:.3f}s' return f'' class Futex: ''' Futex - fast userspace mutex, borrowing from Linux kernel. It is a sychronization primitive used to coordinate tasks. ''' # A symbol for wake all tasks WAKE_ALL = -1 def __init__(self): self._nio_ref_ = None def __repr__(self): return f'' async def wait(self): '''Wait on the futex''' await nio_futex_wait(self) async def wake(self, n: int): '''Wake up at most n tasks waiting on the futex''' await nio_futex_wake(self, n) class Task: '''A wrapper for kernel task''' def __init__(self, kernel_task): self._nio_ref_ = kernel_task kernel_task._nio_ref_ = self @property def ident(self) -> int: '''task id''' return self._nio_ref_.ident @property def name(self) -> str: '''task name''' return self._nio_ref_.name @property def is_alive(self) -> bool: '''is task alive''' return self._nio_ref_.is_alive @property def result(self): '''Get task result after task stoped''' if self.is_alive: raise RuntimeError('task is alive, can not get result') return self._nio_ref_.result @property def error(self): '''Get task error after task stoped''' if self.is_alive: raise RuntimeError('task is alive, can not get error') return self._nio_ref_.error async def join(self) -> None: '''Join the task''' await nio_join(self) async def cancel(self) -> None: '''Cancel the task''' await nio_cancel(self) def __repr__(self): if self.is_alive: state = 'alive' elif self.error: state = 'error' else: state = 'stoped' return f'' @coroutine def nio_wait_read(fd: FileDescriptor) -> None: '''Wait until fd readable Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' yield (nio_wait_read, fd) @coroutine def nio_wait_write(fd: FileDescriptor) -> None: '''Wait until fd writeable Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' yield (nio_wait_write, fd) @coroutine def nio_sleep(seconds: float=0) -> None: '''Sleep at least seconds Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' if seconds < 0: raise ValueError('can not sleep a negative seconds') yield (nio_sleep, seconds) @coroutine def nio_spawn(coro) -> Task: '''Spawn a task Raises: TaskCanceled: task canceled ''' return (yield (nio_spawn, coro)) @coroutine def nio_current_task() -> Task: '''Get current task Raises: TaskCanceled: task canceled ''' return (yield (nio_current_task,)) @coroutine def nio_cancel(task: Task) -> None: '''Cancel a task This call will return immediately, before task stoped. This call will not raise TaskCanceled, so it's safe to cancel multiple tasks one by one. ''' yield (nio_cancel, task) @coroutine def nio_join(task: Task) -> None: '''Join a task, wait until the task stoped Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' yield (nio_join, task) @coroutine def nio_set_timeout(seconds) -> Timeout: '''Set a timeout for current task Raises: TaskCanceled: task canceled ''' if seconds < 0: raise ValueError('can not set negative timeout') return (yield (nio_set_timeout, seconds)) @coroutine def nio_unset_timeout(timeout: Timeout) -> None: '''Unset a timeout for current task Raises: TaskCanceled: task canceled ''' yield (nio_unset_timeout, timeout) @coroutine def nio_futex_wait(futex: Futex) -> None: '''Wait on futex Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' yield (nio_futex_wait, futex) @coroutine def nio_futex_wake(futex: Futex, n: int) -> None: '''Wake up at most n tasks waiting on futex Raises: TaskCanceled: task canceled ''' yield (nio_futex_wake, futex, n) @coroutine def nio_run_in_thread(fn, *args, **kwargs): '''Run fn in thread pool Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' yield(nio_run_in_thread, fn, args, kwargs) @coroutine def nio_run_in_process(fn, *args, **kwargs): '''Run fn in process pool Raises: TaskTimeout: task timeout TaskCanceled: task canceled ''' yield(nio_run_in_process, fn, args, kwargs) PK!HVWnewio-0.1.0.dist-info/WHEEL A н#Y@Z|*mqvhy3hYP΍ΛU\hy9l_]n0(r3PK!HoOnewio-0.1.0.dist-info/METADATA E|kH&4nLhDG$-`;̬ t輲FМI5PzAjdw%7q / _|[g?̥0> 5Oy6A9;ծ.6'i]1_1`4UIo1yaJhc%u<^Ħ˺wBLM>V}}uHRx"2_с%\C/y k-]g|8tc|Y+|G ߇o-%׼6y6Aiܺ )K.pTO]=DphweCL є?IgԽKGa} KRezx6uy`I;ˑ7PK!!eNNnewio/__init__.pyPK!RBB}newio/_socket.pyPK!dS  newio/api.pyPK!^E%newio/queue.pyPK!FE.newio/socket.pyPK!-0[% % ?5newio/ssl.pyPK!Moo >newio/sync.pyPK!x\e(Nnewio/syscall.pyPK!HVW:hnewio-0.1.0.dist-info/WHEELPK!HoOhnewio-0.1.0.dist-info/METADATAPK!HdUPinewio-0.1.0.dist-info/RECORDPK k