PK!0QQquicken/__init__.pyfrom ._cli import cli_factory, QuickenError from .__version__ import __version__ PK! qquicken/__version__.py__version__ = '0.1.0' PK!<  quicken/_asyncio.py"""Asyncio utility classes. """ import asyncio import logging import multiprocessing import os import signal logger = logging.getLogger(__name__) class DeadlineTimer: """Timer that can handle waits > 1 day, since Python < 3.7.1 does not. """ MAX_DURATION = 86400 def __init__(self, callback, loop: asyncio.AbstractEventLoop): self._loop = loop self._callback = callback self._time_remaining = 0 self._handle: asyncio.TimerHandle = None def cancel(self): if self._handle: self._handle.cancel() self._handle = None def expires_from_now(self, seconds): if seconds < 0: raise ValueError('seconds must be positive') self.cancel() wait = seconds % self.MAX_DURATION self._time_remaining = max(seconds - wait, 0) self._handle = self._loop.call_later(wait, self._handle_expiration) def expires_at(self, seconds): self.expires_from_now(seconds - self._loop.time()) def _handle_expiration(self): self._handle = None if not self._time_remaining: self._callback() else: self.expires_from_now(self._time_remaining) class AsyncProcess: """asyncio wrapper for multiprocessing.Process. Not thread-safe. """ def __init__(self, target, args=(), kwargs=None, loop=None): self._target = target self._args = args if kwargs is None: kwargs = {} self._kwargs = kwargs if loop is None: loop = asyncio.get_running_loop() self._loop = loop self._process: multiprocessing.Process = None self._stop_cv = asyncio.Condition() self._stopped = False def start(self): assert self._process is None, 'Process was already started' self._process = multiprocessing.get_context('fork').Process( target=self._target, args=self._args, kwargs=self._kwargs) self._process.start() # Clean up to avoid lingering references. self._target = None self._args = None self._kwargs = None self._loop.add_reader(self._process.sentinel, self._handle_process_stop) def send_signal(self, sig): assert self._process is not None, 'Process must be started' os.kill(self._process.pid, sig) def terminate(self): self.send_signal(signal.SIGTERM) def kill(self): self.send_signal(signal.SIGKILL) @property def pid(self): return self._process.pid @property def exitcode(self): return self._process.exitcode async def wait(self): async with self._stop_cv: await self._stop_cv.wait_for(lambda: self._stopped) return self.exitcode def _handle_process_stop(self): self._loop.remove_reader(self._process.sentinel) self._process.join() async def notify(): async with self._stop_cv: self._stopped = True self._stop_cv.notify_all() self._loop.create_task(notify()) PK!P++quicken/_cli.py"""CLI wrapper interface for starting/using server process. """ from functools import wraps import json import logging import multiprocessing import os from pathlib import Path from typing import Callable, Dict, Optional, Union from fasteners import InterProcessLock from ._client import Client from ._typing import NoneFunction from ._constants import socket_name, server_state_name from ._protocol import ProcessState, Request, RequestTypes from ._signal import blocked_signals, forwarded_signals, SignalProxy from ._xdg import cache_dir, chdir, RuntimeDir logger = logging.getLogger(__name__) CliFactoryT = Callable[[], NoneFunction] BoolProvider = Callable[[], bool] class QuickenError(Exception): """Generic error during server start - message has details. """ pass def cli_factory( name: str, *, runtime_dir_path: Optional[str] = None, log_file: Optional[str] = None, server_idle_timeout: Optional[float] = None, bypass_server: BoolProvider = None, reload_server: BoolProvider = None, ): """Decorator to mark a function that provides the main script entry point. To benefit most from the daemon speedup, you must do required imports within the factory function itself and then have the returned function itself do a minimal amount of configuration - only those things dependent on e.g. environment/cwd. If any imported top-level modules make use of environment then they must be reconfigured on invocation of the cli, otherwise the environment of future clients will not be taken into account. Args: name: the name used for the socket file. runtime_dir_path: the directory used for the socket and pid file. If not provided then we fall back to: `$XDG_RUNTIME_DIR/quicken-{name}` or `$TMPDIR/quicken-{name}-{uid}` or `/tmp/quicken-{name}-{uid}`. If the directory exists it must be owned by the current user and have permissions 700. log_file: optional log file used by the server, must be an absolute path. If not provided the default is `$XDG_CACHE_HOME/quicken-{name}/server.log` or `$HOME/.cache/quicken-{name}/server.log`. server_idle_timeout: time in seconds after which the server will shut down if no requests are being processed. bypass_server: if True then run command directly instead of trying to use daemon. reload_server: if True then restart the server before executing the function. Throws: QuickenError: If any directory used by runtime_dir does not have the correct permissions. """ def inner_cli_factory(factory_fn: CliFactoryT) -> NoneFunction: @wraps(factory_fn) def run_cli() -> Optional[int]: """ Returns: Result from function or remote execution, suitable for passing to :func:`sys.exit`. """ nonlocal log_file if log_file is None: log_file = cache_dir(f'quicken-{name}') / 'server.log' log_file = Path(log_file).absolute() if bypass_server and bypass_server(): logger.debug('Bypassing server') return factory_fn()() runtime_dir = RuntimeDir(f'quicken-{name}', runtime_dir_path) client = None with CliServerManager( factory_fn, runtime_dir, log_file, server_idle_timeout) as manager: try: client = manager.connect() if reload_server and reload_server(): logger.debug('Reloading server') client = manager.restart() # TODO: Get server version. # TODO: Get server context and kill without pid. except ConnectionRefusedError: logger.warning( 'Failed to connect to server - executing cli directly.') if not client: multiprocessing.current_process().authkey = os.urandom(32) return factory_fn()() return _run_client(client) return run_cli return inner_cli_factory class CliServerManager: """Responsible for starting (if applicable) and connecting to the server. Race conditions are prevented by acquiring an exclusive lock on runtime_dir/admin during connection and start. """ def __init__( self, factory_fn, runtime_dir: RuntimeDir, log_file, server_idle_timeout): """ Args: factory_fn: function that provides the server request handler runtime_dir: runtime dir used for locks/socket log_file: server log file server_idle_timeout: idle timeout communicated to server if the process of connecting results in server start """ self._factory = factory_fn self._runtime_dir = runtime_dir self._log_file = log_file self._idle_timeout = server_idle_timeout self._lock = InterProcessLock('admin') # We initialize the Client and Listener classes without an authkey # parameter since there's no way to pre-share the secret securely # between processes not part of the same process tree. However, the # internal Client/Listener used as part of # multiprocessing.resource_sharer DOES initialize its own Client and # Listener with multiprocessing.current_process().authkey. We must have # some value so we use this dummy value. multiprocessing.current_process().authkey = b'0' * 32 def connect(self) -> Client: """Attempt to connect to the server, starting it if required. Args: timeout: seconds to wait for successful connection or startup Returns: Client connected to the server """ try: return self._get_client() except FileNotFoundError: # Server not up, no problem, we'll try to start it. logger.debug('Server not up, starting it.') except ConnectionRefusedError: socket_file = self._runtime_dir.path(socket_name) # Server may have died unexpectedly. logger.warning('Could not connect to server, starting it.') # Clean up the socket file before proceeding. socket_file.unlink() self._start_server() # Try to connect again, this time we don't catch anything, leave it to # the caller. return self._get_client() def restart(self) -> Client: """Restart the server and reconnect. """ self._stop_server() return self.connect() def _get_client(self) -> Client: """ Raises: FileNotFoundError if the socket file is not present ConnectionRefusedError if the socket file is present but the server is not accepting connections """ with chdir(self._runtime_dir): return Client(socket_name) def _get_server_state(self) -> Dict: """Retrieve server state data. """ with chdir(self._runtime_dir): return json.loads( Path(server_state_name).read_text(encoding='utf-8')) def _stop_server(self): from psutil import NoSuchProcess, Process server_state = self._get_server_state() pid = server_state['pid'] create_time = server_state['create_time'] try: process = Process(pid=pid) except NoSuchProcess: logger.debug( f'Daemon reload requested but process with pid {pid}' ' does not exist.') return if process.create_time() != create_time: logger.debug( 'Daemon reload requested but start time does not match' ' expected (probably new process re-using pid), skipping.') return try: # We don't want to leave it to the server to remove the socket since # we do not wait for it. with chdir(self._runtime_dir): os.unlink(socket_name) except FileNotFoundError: # No problem, if the file was removed at some point it doesn't # impact us. pass # This will cause the server to stop accepting clients and start # shutting down. It will wait for any still-running processes before # stopping completely, but it does not consume any other resources that # we are concerned with. process.terminate() def _start_server(self): """Start server as background process. The socket for the server has been created by the time this function returns. This function only returns in the parent, not the background process. """ cli = self._factory() # Lazy import so we only take the time to import if we have to start # the server. from ._server import run run( cli, log_file=self._log_file, runtime_dir=self._runtime_dir, server_idle_timeout=self._idle_timeout) def __enter__(self): """Enter the server admin lock context. """ with chdir(self._runtime_dir): # TODO: Ensure that this creates our lock file with 700 since # otherwise it might not be respected. self._lock.acquire() return self def __exit__(self, exc_type, exc_val, exc_tb): """Exit the server admin lock context. """ self._lock.release() def _run_client(client: Client) -> int: """Run command client against daemon listening at provided `socket_file`. Sends process context and waits for exit code. Process context includes: - environment - cwd - command line - file descriptors for stdin/out/err Args: sock: Socket connected to server. Must be a type appropriate for passing file descriptors. Returns: exit code of the process Raises: ConnectionRefusedError if server is not listening/available. """ logger.debug('Starting client communication') # Assume that we've already vetted the server and now we just need to run # the process. proxy = SignalProxy() # We must block signals before requesting remote process start otherwise # a user signal to the client may race with our ability to propagate it. with blocked_signals(forwarded_signals): state = ProcessState.for_current_process() logger.debug('Requesting process start') req = Request(RequestTypes.run_process, state) response = client.send(req) pid = response.contents logger.debug('Process running with pid: %d', pid) proxy.set_target(pid) logger.debug('Waiting for process to finish') response = client.send(Request(RequestTypes.wait_process_done, None)) return response.contents PK!=0quicken/_client.pyimport multiprocessing.connection from ._protocol import Request, Response class Client: """Enforces a request/response protocol on top of multiprocessing.connection.Client. """ def __init__(self, *args, **kwargs): self._client = multiprocessing.connection.Client(*args, **kwargs) def send(self, request: Request) -> Response: self._client.send(request) return self._client.recv() PK!kxw|88quicken/_constants.pysocket_name = 'socket' server_state_name = 'state.json' PK!9ɣYYquicken/_debug.pyimport functools import logging logger = logging.getLogger(__name__) def log_calls(obj): import inspect if inspect.isclass(obj): return _handle_class(obj) return _log_calls(obj) def _handle_class(cls): import inspect for name, fn in inspect.getmembers(cls): logged_member = _log_calls(fn, cls.__name__) if logged_member != fn: setattr(cls, name, logged_member) return cls def _log_calls(obj, namespace=None): import inspect try: name = obj.__name__ except AttributeError: name = None if namespace is not None: name = f'{namespace}.{name}' if inspect.isgeneratorfunction(obj): return _handle_generator_function(obj, name) if inspect.iscoroutinefunction(obj): return _handle_coroutine_function(obj, name) if inspect.isfunction(obj): return _handle_function(obj, name) return obj def _handle_function(f, name): @functools.wraps(f) def wrapper(*args, **kwargs): logger.debug(f'(call) {name}()') return f(*args, **kwargs) return wrapper def _handle_generator_function(f, name): @functools.wraps(f) def wrapper(*args, **kwargs): logger.debug(f'(next) {name}()') yield from f(*args, **kwargs) def provider(*args, **kwargs): logger.debug(f'(invoke) {name}()') return wrapper(*args, **kwargs) return provider def _handle_coroutine_function(f, name): @functools.wraps(f) async def wrapper(*args, **kwargs): logger.debug(f'(await) {name}()') return await f(*args, **kwargs) def provider(*args, **kwargs): logger.debug(f'(invoke) {name}()') return wrapper(*args, **kwargs) return provider def lineno(): import inspect frame = inspect.getouterframes(inspect.currentframe())[1] return frame.f_lineno PK!7+7+quicken/_multiprocessing.py"""Transferring files over connections. """ import asyncio from contextlib import contextmanager from io import TextIOWrapper import logging import multiprocessing from multiprocessing.connection import wait from multiprocessing.reduction import DupFd, register import os import socket import sys from typing import Any, TextIO logger = logging.getLogger(__name__) def run_in_process( target, name=None, args=(), kwargs=None, allow_detach=False, timeout=None): """Run provided target in a multiprocessing.Process. This function does not require that the `target` and arguments are picklable. Only the return value of `target` must be. Args: target: same as multiprocessing.Process name: same as multiprocessing.Process args: same as multiprocessing.Process kwargs: same as multiprocessing.Process allow_detach: passes a callback as the first argument to the function that, when invoked, detaches from the parent by forking. timeout: seconds after which processing will be aborted and the child process killed Returns: The return value of `target` Raises: *: Any exception raised by `target`. TimeoutError: If a timeout occurs. """ if not kwargs: kwargs = {} def launcher(): # multiprocessing doesn't offer a good way to detach from the parent # process, allowing the child to exist without being cleaned up at # parent close. So given # # 1. parent process (which invoked run_in_process) # 2. runner process (executing target function) # # we fork (2), creating (3) then continue executing in (3) and forcibly # exit (2). # # The downside of this approach is that any exceptions from the # process after detaching will not be propagated to the caller # (and Windows incompatibility). def detach(result=None): # Indicate no exception. child_pipe.send(False) child_pipe.send(result) pid = os.fork() if pid: # Ensure we don't return to caller within the subprocess. os._exit(0) new_args = list(args) if allow_detach: new_args.insert(0, detach) try: result = target(*new_args, **kwargs) except: child_pipe.send(True) from tblib import pickling_support pickling_support.install() child_pipe.send(sys.exc_info()) # Wait for signal from parent process to avoid exit/read race # condition. child_pipe.recv() # We don't really want the exception traced by multiprocessing # so exit like Python would. sys.exit(1) else: child_pipe.send(False) child_pipe.send(result) child_pipe.recv() ctx = multiprocessing.get_context('fork') child_pipe, parent_pipe = ctx.Pipe() p = ctx.Process(target=launcher, name=name) p.start() ready = wait([p.sentinel, parent_pipe], timeout=timeout) # Timeout if not ready: p.kill() raise TimeoutError('Timeout running function.') exc = None result = None if parent_pipe in ready: error = parent_pipe.recv() if error: from tblib import pickling_support pickling_support.install() _, exception, tb = parent_pipe.recv() exc = exception.with_traceback(tb) else: result = parent_pipe.recv() if p.sentinel in ready: # This can happen if the child process closes file descriptors, but we # do not handle it. assert p.exitcode is not None, 'Exit code must exist' if p.exitcode: if not exc: exc = RuntimeError( f'Process died with return code {p.exitcode}') else: # Indicate OK to continue. parent_pipe.send(True) p.join() if exc: raise exc return result class ConnectionClose(Exception): pass class AsyncConnectionAdapter: """Wraps multiprocessing.Connection. The underlying socket is still blocking, but this works better in our async server. Not thread-safe. """ def __init__(self, conn: multiprocessing.connection.Connection, loop=None): if not loop: loop = asyncio.get_running_loop() self._loop = loop self.connection = conn self._fd = self.connection.fileno() self._connected = True self._disconnect_cv = asyncio.Condition(loop=self._loop) self._attached = False self._read_queue = asyncio.Queue(loop=self._loop) self._write_queue = asyncio.Queue(loop=self._loop) self._attach_to_event_loop() async def send(self, obj: Any): """ Send provided object over Connection. Args: obj: picklable object """ await self._write_queue.put(obj) async def recv(self) -> Any: """ Returns: message received from peer Raises: ConnectionClose on connection close """ item = await self._read_queue.get() if isinstance(item, ConnectionClose): raise item return item async def closed(self): """Returns when the connection is closed. """ if self._connected: async with self._disconnect_cv: await self._disconnect_cv.wait() def _attach_to_event_loop(self): assert not self._attached, 'Must not be attached to attach' self._attached = True self._loop.add_reader(self._fd, self._handle_readable) self._loop.add_writer(self._fd, self._handle_writable) def _detach_from_event_loop(self): assert self._attached, 'Must be attached to detach' self._attached = False self._loop.remove_reader(self._fd) self._loop.remove_writer(self._fd) def _handle_readable(self): try: self._read_queue.put_nowait(self.connection.recv()) except EOFError: self._handle_disconnect() def _handle_disconnect(self): async def signal_disconnect(): async with self._disconnect_cv: self._disconnect_cv.notify_all() self._detach_from_event_loop() self._read_queue.put_nowait(ConnectionClose()) self._connected = False self._loop.create_task(signal_disconnect()) def _handle_writable(self): async def handle(): msg = await self._write_queue.get() self.connection.send(msg) if self._attached: self._loop.add_writer(self._fd, self._handle_writable) self._loop.remove_writer(self._fd) self._loop.create_task(handle()) @contextmanager def intercepted_sockets(): sockets = [] _socket_new = socket.socket.__new__ def socket_new(*args, **kwargs): sock = _socket_new(*args, **kwargs) sockets.append(sock) return sock socket.socket.__new__ = socket_new try: yield sockets finally: socket.socket.__new__ = _socket_new class ListenerStopped(Exception): pass class AsyncListener: """Async multiprocessing.connection.Listener. Not thread-safe. """ def __init__(self, address, loop=None): if not loop: loop = asyncio.get_running_loop() self._loop = loop self._backlog = asyncio.Queue(loop=self._loop) with intercepted_sockets() as sockets: self._listener = multiprocessing.connection.Listener(address) assert len(sockets) == 1, 'Only one socket should have been created' self._socket = sockets[0] logger.debug('Started listener (%d)', self._socket.fileno()) self._accepting = True self._attached = False self._shutdown = False self._attach_to_event_loop() async def accept(self) -> AsyncConnectionAdapter: """Accept the next incoming connection. Raises: ListenerStopped """ connection = await self._backlog.get() if isinstance(connection, ListenerStopped): raise connection return AsyncConnectionAdapter(connection, loop=self._loop) async def close(self): """Close the listener, waiting for the socket to be closed. This close does not prevent racing with an incoming client - consumers should use higher-level locking if required. """ logger.debug('AsyncListener.close()') try: if not self._accepting: self._shutdown = True return if self._shutdown: return self._shutdown = True # Detach so no more events are received and any pending events # cancelled. self._detach_from_event_loop() logger.debug('Closing Listener') # Removes socket file (if it exists) and calls socket.close(). self._listener.close() # Signal to consumers that the queue is drained. self._backlog.put_nowait(ListenerStopped()) except FileNotFoundError: # No problem if the socket file has already been removed. pass def _attach_to_event_loop(self): assert not self._attached, 'Must not be attached to attach' self._attached = True self._loop.add_reader(self._socket.fileno(), self._handle_readable) def _detach_from_event_loop(self): assert self._attached, 'Must be attached to detach' self._attached = False self._loop.remove_reader(self._socket.fileno()) def _handle_readable(self): try: self._backlog.put_nowait(self._listener.accept()) except multiprocessing.AuthenticationError: logger.warning('Authentication error') except OSError as e: # Listener raises its own error with no errno. if e.errno is not None: raise # EINVAL raised when socket is closed, expected condition. #if e.errno != errno.EINVAL: # raise self._handle_disconnect() def _handle_disconnect(self): self._detach_from_event_loop() self._accepting = False def reduce_textio(obj: TextIO): # Picklable object that contains a callback id to be used by the # receiving process. if obj.readable() == obj.writable(): raise ValueError( 'TextIO object must be either readable or writable, but not both.') df = DupFd(obj.fileno()) return rebuild_textio, (df, obj.readable(), obj.writable()) def rebuild_textio(df: DupFd, readable: bool, _writable: bool) -> TextIO: fd = df.detach() flags = 'r' if readable else 'w' return open(fd, flags) register(TextIOWrapper, reduce_textio) PK!uv, , quicken/_protocol.pyfrom __future__ import annotations import copy from dataclasses import dataclass import io import logging import os from pathlib import Path import sys from typing import Any, Dict, List # Registers TextIOWrapper handler. from . import _multiprocessing class RequestTypes: get_server_state = 'get_server_state' run_process = 'run_process' wait_process_done = 'wait_process_done' @dataclass class Request: name: str contents: Any @dataclass class Response: contents: Any @dataclass class StdStreams: stdin: io.TextIOWrapper stdout: io.TextIOWrapper stderr: io.TextIOWrapper @dataclass class ProcessState: std_streams: StdStreams cwd: Path umask: int environment: Dict[str, str] argv: List[str] @staticmethod def for_current_process() -> ProcessState: streams = StdStreams(sys.stdin, sys.stdout, sys.stderr) cwd = Path.cwd() # Only way to get umask is to set umask. umask = os.umask(0o077) os.umask(umask) environ = dict(os.environ) argv = list(sys.argv) return ProcessState(streams, cwd, umask, environ, argv) @staticmethod def _reset_loggers(stdout, stderr): def reset_handlers(logger): for h in logger.handlers: if isinstance(h, logging.StreamHandler): # Check if using stdout/stderr in underlying stream and call # setStream if so. # XXX: Use setStream in Python 3.7 if h.stream == sys.stdout: h.stream = stdout elif h.stream == sys.stderr: h.stream = stderr # For 'manager' property. # noinspection PyUnresolvedReferences loggers = logging.Logger.manager.loggerDict for _, item in loggers.items(): if isinstance(item, logging.PlaceHolder): # These don't have their own handlers. continue reset_handlers(item) reset_handlers(logging.getLogger()) @staticmethod def apply_to_current_process(state: ProcessState): streams = state.std_streams __class__._reset_loggers(streams.stdout, streams.stderr) sys.stdin, sys.stdout, sys.stderr = \ streams.stdin, streams.stdout, streams.stderr os.chdir(str(state.cwd)) os.umask(state.umask) os.environ = copy.deepcopy(state.environment) sys.argv = list(state.argv) @dataclass class ServerState: start_time: float pid: int context: Dict[str, str] PK!f ӉFFquicken/_server.py""" Command execution server process. | Server process | | forkserver | client server run ------- client -> server To allow use of any callable in the server we override the forkserver implementation and do not """ from abc import ABC, abstractmethod import asyncio from contextlib import ExitStack import errno import functools import json import logging import logging.config import multiprocessing import os from pathlib import Path import signal import socket import sys import time import traceback from typing import Callable, Dict, Optional import daemon import daemon.daemon import psutil from .__version__ import __version__ from ._asyncio import AsyncProcess, DeadlineTimer from ._multiprocessing import run_in_process, AsyncConnectionAdapter, \ AsyncListener, ConnectionClose, ListenerStopped from ._typing import NoneFunction from ._constants import socket_name, server_state_name from ._protocol import ProcessState, Request, RequestTypes, Response, ServerState from ._xdg import RuntimeDir logger = logging.getLogger(__name__) RequestCallbackT = Callable[[socket.socket], None] def run( socket_handler: RequestCallbackT, runtime_dir: RuntimeDir, server_idle_timeout: Optional[float] = None, log_file: Optional[Path] = None): """Start the server in the background. The function returns only when the server has successfully started. Args: socket_handler: function invoked on each request runtime_dir: directory for holding socket/pid file and used as the working directory for the server server_idle_timeout: timeout after which server will shutdown if no active requests log_file: used for server-side logging Raises: Same as `open` for log file issues If there are any issues starting the server errors are re-raised. """ logger.debug('Starting server launcher') daemon_options = { # This ensures that relative files are created in the context of the # actual runtime dir and not at the path that happens to exist at the # time. 'working_directory': runtime_dir.fileno(), # Keep runtime directory open. 'files_preserve': [runtime_dir.fileno()], } target = functools.partial(_run_server, socket_handler, server_idle_timeout) return run_in_process( daemonize, args=(target, daemon_options, log_file), allow_detach=True) def daemonize(detach, target, daemon_options: Dict, log_file=None): def patch_python_daemon(): # We don't want to close any open files right now since we # cannot distinguish between the ones we want to keep and # those we do not. For our use case there should not be # many opened files anyway. # XXX: If we do eventually want to close open files, keep in mind # that it may be slow and there are platform-specific speedups we # can do. def patched(**_): return daemon.daemon.close_all_open_files = patched patch_python_daemon() def detach_process_context(): """The default behavior in python-daemon is to let the parent die, but that doesn't work for us - the parent becomes the first client and should stay alive. """ # Make the process a process leader - signals sent to the parent group # will no longer propagate by default. os.setsid() # If detach_process is unspecified in the constructor then python-daemon # attempts to determine its value dynamically. This involves accessing stdin # which can fail if it has been overridden (as in unit tests). ctx = daemon.DaemonContext(detach_process=False) for k, v in daemon_options.items(): setattr(ctx, k, v) # We handle detaching. ctx.detach_process = False # Secure umask by default. ctx.umask = 0o077 detach_process_context() # Reset signal handlers. for signum in range(1, signal.NSIG): try: signal.signal(signum, signal.SIG_DFL) except OSError as e: if e.errno != errno.EINVAL: raise # Reset signal disposition. signal.pthread_sigmask(signal.SIG_SETMASK, set()) with ctx: if log_file: _configure_logging(log_file, loglevel='DEBUG') target(detach) def _run_server( callback: RequestCallbackT, server_idle_timeout: Optional[float], done) -> None: """Server server that provides sockets to `callback`. Method must be invoked when cwd is suitable for secure creation of files. Args: callback: the callback function invoked on each request server_idle_timeout: timeout after which the server will stop automatically done: callback function invoked after setup and before we start handling requests """ logger.debug('_run_server()') loop = asyncio.new_event_loop() loop.set_debug(True) def print_exception(_loop, context): exc = context['exception'] formatted_exc = ''.join( traceback.format_exception(type(exc), exc, exc.__traceback__)) logger.error( 'Error in event loop: %s\n%s', context['message'], formatted_exc) loop.set_exception_handler(print_exception) handler = ProcessConnectionHandler(callback, {}, loop=loop) def finish_loop(): logger.debug('Stopping loop') loop.stop() tasks = asyncio.all_tasks(loop) logger.debug('Number of pending tasks: %d', len(tasks)) loop.run_until_complete(asyncio.gather(*tasks)) logger.debug('Finished pending tasks') # socket_name is relative and we must already have cwd set to the # runtime_dir. server = Server( socket_name, handler, finish_loop, server_idle_timeout, loop=loop) def handle_sigterm(): logger.debug('Received SIGTERM') loop.create_task(server.stop()) loop.add_signal_handler(signal.SIGTERM, handle_sigterm) done() # For logging. multiprocessing.current_process().name = 'server' # For server state info. pid = os.getpid() process = psutil.Process(pid) server_state = { 'create_time': process.create_time(), 'version': __version__, 'pid': pid, } Path(server_state_name).write_text( json.dumps(server_state), encoding='utf-8') loop.create_task(server.serve()) loop.run_forever() logger.debug('Server finished.') class ConnectionHandler(ABC): @abstractmethod async def handle_connection(self, connection: AsyncConnectionAdapter): pass @abstractmethod async def handle_shutdown(self): pass class ProcessConnectionHandler(ConnectionHandler): def __init__(self, callback, context: Dict[str, str], loop=None): """ Args: callback: function to be executed in child process context: server execution context, pretty much a user info object """ if not loop: loop = asyncio.get_event_loop() self._loop = loop self._callback = callback self._context = context self._start_time = time.time() self._pid = os.getpid() self._connection_finish_cv = asyncio.Condition(loop=self._loop) self._num_active_connections = 0 async def handle_connection(self, connection: AsyncConnectionAdapter): self._num_active_connections += 1 process: AsyncProcess = None process_task: asyncio.Task = None queue = asyncio.Queue() async def handle_request(): nonlocal process, process_task logger.debug('Waiting for request') request = await queue.get() if request.name == RequestTypes.get_server_state: state = ServerState(self._start_time, self._pid, self._context) logger.debug('Sending server state') await connection.send(Response(state)) elif request.name == RequestTypes.run_process: process_state = request.contents process = self._start_callback(process_state) process_task = asyncio.create_task(process.wait()) pid = process.pid logger.debug('Running process in handler: %d', pid) await connection.send(Response(pid)) elif request.name == RequestTypes.wait_process_done: assert process is not None, \ 'Process must have been started' logger.debug('Waiting for process to exit') # We don't want the process.wait() task to be cancelled in case # our connection gets broken. exitcode = await asyncio.shield(process_task) logger.debug('Result: %d', exitcode) await connection.send(Response(exitcode)) return True async def accept_request(): try: request: Request = await connection.recv() except ConnectionClose: logger.debug( 'Connection closed (%d)', connection.connection.fileno()) except ConnectionResetError: logger.debug( 'Connection reset (%d)', connection.connection.fileno()) else: # We dispatch asynchronously so we can always notice connection # reset quickly. queue.put_nowait(request) return True # This occurs when we have disconnected from the client so cancel # any pending responses and kill the child process. logger.debug('Killing child process') if process: try: process.kill() except ProcessLookupError: # No problem, process already exited. pass logger.debug('Cancelling request handler') request_handler.cancel() async def loop(coro): while True: if not await coro(): break request_acceptor = asyncio.create_task(loop(accept_request)) request_handler = asyncio.create_task(loop(handle_request)) all_tasks = asyncio.gather(request_acceptor, request_handler) try: await all_tasks except asyncio.CancelledError: pass finally: logger.debug('Task cancelled or exception') all_tasks.cancel() if process_task: logger.debug('Waiting for child process to exit') logger.debug('Process task: %s', process_task) await process_task logger.debug( 'Done with connection (%d)', connection.connection.fileno()) self._num_active_connections -= 1 async with self._connection_finish_cv: self._connection_finish_cv.notify() def _start_callback(self, process_state) -> AsyncProcess: def setup_child(): ProcessState.apply_to_current_process(process_state) # Reset authkey since we remove it for server startup. multiprocessing.current_process().authkey = os.urandom(32) try: sys.exit(self._callback()) except SystemExit as e: # multiprocessing sets exitcode to 1 if `sys.exit` is called # with `None` or no arguments, so we re-map it here. # See https://bugs.python.org/issue35727. if e.code is None: e.args = (0,) e.code = 0 raise process = AsyncProcess(target=setup_child, loop=self._loop) process.start() return process async def handle_shutdown(self): """Shutdown executor""" logger.debug('Waiting for all connection handling to be done') # Wait for handling of all connections to be done. async with self._connection_finish_cv: await self._connection_finish_cv.wait_for( lambda: not self._num_active_connections) class Server: """A multiprocessing.Connection server.Server accepts new connections and dispatches handling of requests to the AsyncProcessExecutor. Per https://bugs.python.org/issue21998 asyncio is not fork-safe, so we spawn an executor prior to the starting of the event loop which has essentially the state that existed after the call to the cli factory. Not thread-safe. """ def __init__( self, socket_path, handler: ConnectionHandler, on_shutdown: NoneFunction, idle_timeout: Optional[int] = None, shutdown_ctx=None, loop=None): """ Args: socket_path: handler: Handler for received connections idle_timeout: shutdown_ctx: Context manager to be entered prior to server shutdown. loop: """ if not loop: loop = asyncio.get_event_loop() self._loop = loop self._listener = AsyncListener(socket_path, loop=self._loop) self._handler = handler self._idle_timeout = idle_timeout self._idle_timer = None self._shutdown_ctx = shutdown_ctx self._num_active_connections = 0 self._shutting_down = False self._shutdown_accept_cv = asyncio.Condition(loop=self._loop) self._on_shutdown = on_shutdown async def serve(self): while True: try: connection = await self._listener.accept() except ListenerStopped: if not self._shutting_down: logger.error('Listener has stopped') else: async with self._shutdown_accept_cv: self._shutdown_accept_cv.notify() return logger.debug( 'Accepted connection (%d)', connection.connection.fileno()) self._handle_connection(connection) async def stop(self): """Gracefully stop server, processing all pending connections. """ # Do server shutdown and pending event handling first. # Server shutdown should ensure: # 1. No accepted connections are unhandled # 2. All pending asynchronous functions have returned # 3. All cleanup by the handler is done. logger.debug('Server.stop()') self._shutting_down = True with ExitStack() as stack: if self._shutdown_ctx: stack.enter_context(self._shutdown_ctx) # Prevent timeout from occurring while we're shutting down. self._clear_idle_timer() # Closing the listener ensures there will be no more connections # queued. logger.debug('Waiting for listener close') await self._listener.close() logger.debug('Waiting for pending connections') async with self._shutdown_accept_cv: await self._shutdown_accept_cv.wait() logger.debug('Waiting for handler shutdown') await self._handler.handle_shutdown() logger.debug('Waiting for shutdown callback') # Finish everything off. self._on_shutdown() def _handle_connection(self, connection: AsyncConnectionAdapter): self._idle_handle_connect() async def wait_closed(): await connection.closed() self._idle_handle_close() self._loop.create_task(wait_closed()) self._loop.create_task( self._handler.handle_connection(connection)) def _handle_timeout(self): self._loop.create_task(self.stop()) def _set_idle_timer(self): if self._shutting_down: return if self._idle_timeout is None: return if self._idle_timer is not None: return self._idle_timer = DeadlineTimer(self._handle_timeout, self._loop) self._idle_timer.expires_from_now(self._idle_timeout) def _clear_idle_timer(self): if self._idle_timeout is None: return if self._idle_timer is None: return self._idle_timer.cancel() self._idle_timer = None def _idle_handle_connect(self): self._num_active_connections += 1 self._clear_idle_timer() def _idle_handle_close(self): self._num_active_connections -= 1 if not self._num_active_connections: self._set_idle_timer() def _configure_logging(logfile: Path, loglevel: str) -> None: class UTCFormatter(logging.Formatter): converter = time.gmtime logfile.parent.mkdir(parents=True, exist_ok=True) # TODO: Make fully configurable. logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { f'{__name__}-formatter': { '()': UTCFormatter, 'format': '#### [{asctime}][{levelname}][{name}]' '[{process} ({processName})][{thread} ({threadName})]\n' ' {message}', 'style': '{', } }, 'handlers': { f'{__name__}-handler': { '()': 'logging.handlers.RotatingFileHandler', 'level': loglevel, 'filename': str(logfile), 'encoding': 'utf-8', 'formatter': f'{__name__}-formatter', 'maxBytes': 5_000_000, 'backupCount': 1, } }, 'loggers': { 'quicken': { 'level': loglevel, 'handlers': [f'{__name__}-handler'], }, 'asyncio': { 'level': loglevel, 'handlers': [f'{__name__}-handler'], }, }, }) logger.info('Server logging configured') PK!wg quicken/_signal.py"""Signal helpers. """ from contextlib import contextmanager import errno import os import signal from typing import Set def _settable_signal(sig) -> bool: """Check whether provided signal may be set. """ try: old = signal.signal(sig, lambda _num, _frame: ...) except OSError as e: assert e.errno == errno.EINVAL return False else: signal.signal(sig, old) return True signal_range = set(range(1, signal.NSIG)) # XXX: Can be signal.valid_signals() in 3.8+ settable_signals = set(filter(_settable_signal, signal_range)) forwarded_signals = settable_signals - { # We skip SIGCHLD because it interferes with tests that use multiprocessing. # We do not expect the client to receive the signal in any case anyway. signal.SIGCHLD, signal.SIGCLD, } @contextmanager def blocked_signals(signals: Set[signal.Signals]): old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, signals) try: yield finally: signal.pthread_sigmask(signal.SIG_SETMASK, old_mask) class SignalProxy: """Implements behavior for proxying signals to another process: 1. All signals are sent to the target process except SIGCHLD, SIGT* 2. If SIGT* is received then we forward it and stop the current process. """ def set_target(self, pid): self._pid = pid self._install_handler() def _install_handler(self): for sig in forwarded_signals: signal.signal(sig, self._handle_signal) def _handle_signal(self, num, _frame): os.kill(self._pid, num) # The SIGT* functions are handled differently, stopping the current # process and the handler process if received. # * SIGTSTP is received usually in response to C-z on the terminal # * SIGTTIN is received when reading from stdin after being backgrounded # We want to behave 'as' the target process as much as possible, so when # one of these is received we stop ourselves so it will look natural in # a shell context if e.g. C-z is pressed. # XXX: signal.SIGTTOU is omitted since it is only conditional and we # don't check the condition. # XXX: For consistent experience, user CLI applications must stop when # receiving these signals - we may be able to relax this restriction # later if we can determine whether the remote process was stopped. if num in [signal.SIGTSTP, signal.SIGTTIN]: os.kill(os.getpid(), signal.SIGSTOP) PK!C@@quicken/_typing.pyfrom typing import Callable NoneFunction = Callable[[], None] PK!-equicken/_xdg.pyfrom contextlib import contextmanager, ExitStack from functools import wraps import os from pathlib import Path, PosixPath import stat import threading from typing import Any, ContextManager, Union @contextmanager def chdir(fd) -> ContextManager: """ Args: fd: anything suitable for passing to os.chdir(), or something with a `fileno` member. """ cwd = os.open('.', os.O_RDONLY) if hasattr(fd, 'fileno'): fd = fd.fileno() os.chdir(fd) try: yield finally: os.fchdir(cwd) os.close(cwd) @contextmanager def lock_guard(l: Union[threading.Lock, threading.RLock]): l.acquire() try: yield finally: l.release() class BoundPath(PosixPath): _lock = threading.RLock() def __init__(self, *_, dir_fd: int): self._dir_fd = dir_fd super().__init__() def __getattribute__(self, name: str) -> Any: """Intercept and execute all functions in the context of the directory. """ attr = super().__getattribute__(name) if callable(attr): @wraps(attr) def wrapper(*args, **kwargs): with ExitStack() as stack: stack.enter_context(lock_guard(self._lock)) try: stack.enter_context(chdir(self._dir_fd)) except AttributeError: # Avoids issues during Path construction, before # __init__ is called. pass return attr(*args, **kwargs) return wrapper return attr @property def dir(self): return self._dir_fd def pass_to(self, callback): return callback(self) class RuntimeDir: """Helper class to create/manage the application runtime directory. If a dir_path is not provided then attempts to find a suitable path: - $XDG_RUNTIME_DIR/{base_name} - $TMPDIR/{base_name}-{uid} - /tmp/{base_name}-{uid} otherwise `__init__` fails. The emphasis here is on securely creating the directory and ensuring its attributes, as well as providing an interface for operating on files in the directory without race conditions. If `os.*` unconditionally supported `dir_fd` we would suggest using that, but since this is not available on all platforms we instead use BoundPath, which does chdir to the directory before providing arguments as a relative path. """ def __init__(self, base_name: str = None, dir_path=None): """ Args: base_name: the name to use for the runtime directory within the temporary file location. dir_path: when provided, overrides the default temporary directory creation behavior. """ if dir_path is None: if base_name is None: raise ValueError( 'At least one of `base_name` or `dir_path` must be' ' provided.') dir_path = runtime_dir(base_name) self._path = dir_path # Open first. try: self._fd = os.open(dir_path, os.O_RDONLY) except FileNotFoundError: Path(dir_path).mkdir(mode=0o700) self._fd = os.open(dir_path, os.O_RDONLY) # Test after open to avoid toctou, also since we do not trust the mode # passed to mkdir. result = os.stat(self._fd) if not stat.S_ISDIR(result.st_mode): raise RuntimeError(f'{dir_path} must be a directory') if result.st_uid != os.getuid(): raise RuntimeError(f'{dir_path} must be owned by the current user') user_rwx = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR if stat.S_IMODE(result.st_mode) != user_rwx: raise RuntimeError(f'{dir_path} must have permissions 700') # At this point the directory referred to by self._fd is: # * owned by the user # * has permission 700 # unless explicitly changed by the user or root. def fileno(self) -> int: return self._fd def path(self, *args) -> BoundPath: """Execute action in directory so relative paths are resolved inside the directory without specific operations needing to support `dir_fd`. """ result = BoundPath(*args, dir_fd=self._fd) if result.is_absolute(): raise ValueError('Provided argument must not be absolute') return result def __str__(self): return self._path def runtime_dir(base_name): try: return f"{os.environ['XDG_RUNTIME_DIR']}/{base_name}" except KeyError: uid = os.getuid() base_name = f'{base_name}-{uid}' try: return f"{os.environ['TMPDIR']}/{base_name}" except KeyError: return f'/tmp/{base_name}' def cache_dir(base_name): try: return Path(os.environ['XDG_CACHE_HOME']) / base_name except KeyError: return Path(os.environ['HOME']) / '.cache' / base_name PK!_/XQQquicken-0.1.0.dist-info/LICENSEMIT License Copyright (c) 2018 Christopher Hunt [fullname] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PK!HڽTUquicken-0.1.0.dist-info/WHEEL A н#Z;/"d&F[xzw@Zpy3Fv]\fi4WZ^EgM_-]#0(q7PK!H3/M quicken-0.1.0.dist-info/METADATAUMs6Wl'ۭD+Ɍ43NI7 "W"*`Њ$vZ_,ݷo+Wx7D)7qqR]谛azKw6JT׾iPl6EYY]w.F7daV-&k GSnso3\ʜ^8+O.΁0|h>Uϕ|#x]렛Ƹ5hI/|ebg؁c:|' Zݶ5X.+&Oųd,JsH,-YIq68 $;i͜sI>&W`-M/CizkQğ T~z =Xc{;L->XC|Қ)Ӵ>$o_uǣZ;S󫻊[vpTJU"?<+_K5[i냭8m֜G.7W=C늫"z9'0}5;Ij|3RBC:{ҥ yx@,Y1쥀#"p˟`UZH&}C5ʄlMô-$E)r@5Ғ \JŤwԵŽJp+9ƙXs,S-6<\uHQޛ!,!`s']_\ !5AըAT@F{T&n $H-sXx $).QV*z0kM+Cpի%,(AN87 :q-syjU ] 4\8q \S/ח O54ڞt,eԊUWѼ=-\{ Шz_a~-fF~\Y˛EW/tK}r,E~Lohʳ_ݷL0LnѯسXCvFP)~HўA q7[RW&Jҷ wUqEE1+d{$fg8PK!Hʥdquicken-0.1.0.dist-info/RECORDuz@}? @A@&A 2H CO" =nIq q.* )x~'򆪧Po\}I $s?6mBu1h7̰nTxjF^;) Rlien 0_-0JrMĤ̓<ݱ˞p`FFA͗T`o#3^sԠ!뭢(yr3mnqaAӴ$|2ֳ/8m߶99eyq=%[.M7p,#"|09, VʷGdDpfr.3睵烒TOPaM!lkvYz (kt4J.#~ofȯ(~MT+& cr}^Tȃ$Ǯ} 6UA O_ח{(Mk5o\ 9d}Wg3Ne9Vu3 Kq5Sjٳ'i-5dԨ5lA{.&mV~NQ,C;Xm$g;*%QEWtE>\O'XYudqM,GI7-oة?,@J*Qg;^ꑛuU>1;?%UcuFUi掙KBٴ@V@de.[ɖdtbƹ力#VY6\_!_PK!0QQquicken/__init__.pyPK! qquicken/__version__.pyPK!<  quicken/_asyncio.pyPK!P++ quicken/_cli.pyPK!=09quicken/_client.pyPK!kxw|88:quicken/_constants.pyPK!9ɣYYN;quicken/_debug.pyPK!7+7+Bquicken/_multiprocessing.pyPK!uv, , Fnquicken/_protocol.pyPK!f ӉFFxquicken/_server.pyPK!wg ]quicken/_signal.pyPK!C@@rquicken/_typing.pyPK!-equicken/_xdg.pyPK!_/XQQquicken-0.1.0.dist-info/LICENSEPK!HڽTUquicken-0.1.0.dist-info/WHEELPK!H3/M quicken-0.1.0.dist-info/METADATAPK!Hʥdquicken-0.1.0.dist-info/RECORDPK