PK! ;#//epicbox/__init__.pyfrom .config import * from .sandboxes import * PK!OsmU  epicbox/config.pyimport tempfile import structlog import structlog._config __all__ = ['Profile', 'configure'] IS_CONFIGURED = False PROFILES = {} DOCKER_URL = None DOCKER_TIMEOUT = 30 DOCKER_MAX_TOTAL_RETRIES = 9 DOCKER_MAX_CONNECT_RETRIES = 5 DOCKER_MAX_READ_RETRIES = 5 DOCKER_BACKOFF_FACTOR = 0.2 DOCKER_WORKDIR = '/sandbox' DEFAULT_LIMITS = { # CPU time in seconds, None for unlimited 'cputime': 1, # Real time in seconds, None for unlimited 'realtime': 5, # Memory in megabytes, None for unlimited 'memory': 64, # Allow user process to fork #'canfork': False, # Limiting the maximum number of user processes in Linux is tricky. # http://unix.stackexchange.com/questions/55319/are-limits-conf-values-applied-on-a-per-process-basis } DEFAULT_USER = 'root' CPU_TO_REAL_TIME_FACTOR = 5 class Profile(object): def __init__(self, name, docker_image, command=None, user=DEFAULT_USER, read_only=False, network_disabled=True): self.name = name self.docker_image = docker_image self.command = command self.user = user self.read_only = read_only self.network_disabled = network_disabled def configure(profiles=None, docker_url=None, base_workdir=None): global IS_CONFIGURED, PROFILES, DOCKER_URL IS_CONFIGURED = True if isinstance(profiles, dict): profiles_map = {name: Profile(name, **profile_kwargs) for name, profile_kwargs in profiles.items()} else: profiles_map = {profile.name: profile for profile in profiles or []} PROFILES.update(profiles_map) DOCKER_URL = docker_url if not structlog._config._CONFIG.is_configured: structlog.configure( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.TimeStamper(fmt='iso'), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.KeyValueRenderer(key_order=['event']), ], logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) PK!brepicbox/exceptions.pyclass EpicBoxError(Exception): """The base class for custom exceptions raised by epicbox.""" class DockerError(EpicBoxError): """An error occurred with the underlying docker system.""" PK! &}.}.epicbox/sandboxes.pyimport io import tarfile import time import uuid from contextlib import contextmanager import structlog from docker.errors import APIError, DockerException, NotFound from requests.exceptions import RequestException from . import config, exceptions, utils __all__ = ['create', 'start', 'destroy', 'run', 'working_directory'] logger = structlog.get_logger() _SANDBOX_NAME_PREFIX = 'epicbox-' class _SandboxContext(dict): """A context manager wrapper for a sandbox container that destroys it upon completion of the block.""" def __enter__(self): return self def __exit__(self, *args): destroy(self) def create(profile_name, command=None, files=None, limits=None, workdir=None): """Create a new sandbox container without starting it. :param str profile_name: One of configured profile names. :param str command: A command with args to run in the sandbox container. :param list files: A list of `{'name': 'filename', 'content': b'data'}` dicts which define files to be written to the working directory of the sandbox. :param dict limits: Specify time and memory limits for the sandboxed process. It overrides the default limits from `config.DEFAULT_LIMITS`. :param workdir: A working directory created using `working_directory` context manager. :return dict: A sandbox object. :raises DockerError: If an error occurred with the underlying docker system. """ if profile_name not in config.PROFILES: raise ValueError("Profile not found: {0}".format(profile_name)) if workdir is not None and not isinstance(workdir, _WorkingDirectory): raise ValueError("Invalid 'workdir', it should be created using " "'working_directory' context manager") profile = config.PROFILES[profile_name] command = command or profile.command or 'true' command_list = ['/bin/sh', '-c', command] limits = utils.merge_limits_defaults(limits) c = _create_sandbox_container(profile.docker_image, command_list, limits, workdir=workdir, user=profile.user, read_only=profile.read_only, network_disabled=profile.network_disabled) if workdir and not workdir.node: node_name = utils.inspect_container_node(c) if node_name: # Assign a Swarm node name to the working directory to run # subsequent containers on this same node. workdir.node = node_name logger.info("Assigned Swarm node to the working directory", workdir=workdir) if files: _write_files(c, files) sandbox = _SandboxContext(c) # Store the realtime limit in the sandbox structure to access it later # in `start` function. sandbox['RealtimeLimit'] = limits['realtime'] logger.info("Sandbox prepared and ready to start", sandbox=sandbox) return sandbox def _create_sandbox_container(image, command, limits, workdir=None, user=None, read_only=False, network_disabled=True): sandbox_id = str(uuid.uuid4()) name = _SANDBOX_NAME_PREFIX + sandbox_id mem_limit = str(limits['memory']) + 'm' binds = { workdir.volume: { 'bind': config.DOCKER_WORKDIR, 'ro': False, } } if workdir else None ulimits = utils.create_ulimits(limits) docker_client = utils.get_docker_client() host_config = docker_client.create_host_config(binds=binds, read_only=read_only, mem_limit=mem_limit, memswap_limit=mem_limit, ulimits=ulimits, log_config={'type': 'none'}) environment = None if workdir and workdir.node: # Add constraint to run a container on the Swarm node that # ran the first container with this working directory. environment = ['constraint:node==' + workdir.node] log = logger.bind(sandbox_id=sandbox_id) log.info("Creating a new sandbox container", name=name, image=image, command=command, limits=limits, workdir=workdir, user=user, read_only=read_only, network_disabled=network_disabled) try: c = docker_client.create_container(image, command=command, user=user, stdin_open=True, environment=environment, network_disabled=network_disabled, name=name, working_dir=config.DOCKER_WORKDIR, host_config=host_config) except (RequestException, DockerException) as e: if isinstance(e, APIError) and e.response.status_code == 409: # This may happen because of retries, it's a recoverable error log.info("The container with the given name is already created", name=name) c = {'Id': name} else: log.exception("Failed to create a sandbox container") raise exceptions.DockerError(str(e)) log.info("Sandbox container created", container=c) return c def start(sandbox, stdin=None): """Start a created sandbox container and wait for it to terminate. :param sandbox: A sandbox to start. :param bytes stdin: The data to be sent to the standard input of the sandbox, or `None`, if no data should be sent. :return dict: A result structure containing the exit code of the sandbox, its stdout and stderr output, duration of execution, etc. :raises DockerError: If an error occurred with the underlying docker system. """ if stdin: if not isinstance(stdin, (bytes, str)): raise TypeError("'stdin' must be bytes or str") if isinstance(stdin, str): stdin = stdin.encode() realtime_limit = sandbox.get('RealtimeLimit') log = logger.bind(sandbox=sandbox) log.info("Starting the sandbox container", stdin_size=len(stdin or '')) result = { 'exit_code': None, 'stdout': b'', 'stderr': b'', 'duration': None, 'timeout': False, 'oom_killed': False, } try: stdout, stderr = utils.docker_communicate(sandbox, stdin=stdin, timeout=realtime_limit) except TimeoutError: log.info("Sandbox realtime limit exceeded", realtime=realtime_limit) result['timeout'] = True except (RequestException, DockerException, OSError) as e: log.exception("Sandbox runtime error") raise exceptions.DockerError(str(e)) else: log.info("Sandbox container exited") state = utils.inspect_container_state(sandbox) result.update(stdout=stdout, stderr=stderr, **state) if (utils.is_killed_by_sigkill_or_sigxcpu(state['exit_code']) and not state['oom_killed']): # SIGKILL/SIGXCPU is sent but not by out of memory killer result['timeout'] = True log.info("Sandbox run result", result=utils.truncate_result(result)) return result def destroy(sandbox): """Destroy a sandbox container. Kill a running sandbox before removal. Remove the volumes auto-created and associated with the sandbox container. :param sandbox: A sandbox to destroy. """ docker_client = utils.get_docker_client() try: docker_client.remove_container(sandbox, v=True, force=True) except (RequestException, DockerException): logger.exception("Failed to destroy the sandbox container", sandbox=sandbox) else: logger.info("Sandbox container destroyed", sandbox=sandbox) def run(profile_name, command=None, files=None, stdin=None, limits=None, workdir=None): """Run a command in a new sandbox container and wait for it to finish running. Destroy the sandbox when it has finished running. The arguments to this function is a combination of arguments passed to `create` and `start` functions. :return dict: Same as for `start`. :raises DockerError: If an error occurred with the underlying docker system. """ with create(profile_name, command=command, files=files, limits=limits, workdir=workdir) as sandbox: return start(sandbox, stdin=stdin) class _WorkingDirectory(object): """Represent a Docker volume used as a working directory. Not intended to be instantiated by yourself. """ def __init__(self, volume, node=None): self.volume = volume self.node = node def __repr__(self): return "WorkingDirectory(volume={!r}, node={!r})".format(self.volume, self.node) @contextmanager def working_directory(): docker_client = utils.get_docker_client() volume_name = 'epicbox-' + str(uuid.uuid4()) log = logger.bind(volume=volume_name) log.info("Creating new docker volume for working directory") try: docker_client.create_volume(volume_name) except (RequestException, DockerException) as e: log.exception("Failed to create a docker volume") raise exceptions.DockerError(str(e)) log.info("New docker volume is created") try: yield _WorkingDirectory(volume=volume_name, node=None) finally: # Ensure that volume cleanup takes place log.info("Removing the docker volume") try: docker_client.remove_volume(volume_name) except NotFound: log.warning("Failed to remove the docker volume, it doesn't exist") except (RequestException, DockerException) as e: log.exception("Failed to remove the docker volume") else: log.info("Docker volume removed") def _write_files(container, files): """Write files to the working directory in the given container.""" # Retry on 'No such container' since it may happen when the function # is called immediately after the container is created. # Retry on 500 Server Error when untar cannot allocate memory. docker_client = utils.get_docker_client(retry_status_forcelist=(404, 500)) log = logger.bind(files=utils.filter_filenames(files), container=container) log.info("Writing files to the working directory in container") mtime = int(time.time()) files_written = [] tarball_fileobj = io.BytesIO() with tarfile.open(fileobj=tarball_fileobj, mode='w') as tarball: for file in files: if not file.get('name') or not isinstance(file['name'], str): continue content = file.get('content', b'') file_info = tarfile.TarInfo(name=file['name']) file_info.size = len(content) file_info.mtime = mtime tarball.addfile(file_info, fileobj=io.BytesIO(content)) files_written.append(file['name']) try: docker_client.put_archive(container, config.DOCKER_WORKDIR, tarball_fileobj.getvalue()) except (RequestException, DockerException) as e: log.exception("Failed to extract an archive of files to the working " "directory in container") raise exceptions.DockerError(str(e)) log.info("Successfully written files to the working directory", files_written=files_written) PK! /}((epicbox/utils.pyimport errno import os import select import signal import socket import struct import time import dateutil.parser import docker import structlog from docker import constants as docker_consts from docker.errors import DockerException from docker.utils import Ulimit from requests.adapters import HTTPAdapter from requests.exceptions import RequestException from requests.packages.urllib3.util.retry import Retry from . import config, exceptions logger = structlog.get_logger() _DOCKER_CLIENTS = {} #: Recoverable IO/OS Errors. ERRNO_RECOVERABLE = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) def get_docker_client(base_url=None, retry_read=config.DOCKER_MAX_READ_RETRIES, retry_status_forcelist=(500,)): client_key = (retry_read, retry_status_forcelist) if client_key not in _DOCKER_CLIENTS: client = docker.Client(base_url=base_url or config.DOCKER_URL, timeout=config.DOCKER_TIMEOUT) retries = Retry(total=config.DOCKER_MAX_TOTAL_RETRIES, connect=config.DOCKER_MAX_CONNECT_RETRIES, read=retry_read, method_whitelist=False, status_forcelist=retry_status_forcelist, backoff_factor=config.DOCKER_BACKOFF_FACTOR, raise_on_status=False) http_adapter = HTTPAdapter(max_retries=retries) client.mount('http://', http_adapter) _DOCKER_CLIENTS[client_key] = client return _DOCKER_CLIENTS[client_key] def inspect_container_node(container): # 404 No such container may be returned when TimeoutError occurs # on container creation. docker_client = get_docker_client(retry_status_forcelist=(404, 500)) try: container_info = docker_client.inspect_container(container) except (RequestException, DockerException) as e: logger.exception("Failed to inspect the container", container=container) raise exceptions.DockerError(str(e)) if 'Node' not in container_info: # Remote Docker side is not a Docker Swarm cluster return None return container_info['Node']['Name'] def inspect_container_state(container): docker_client = get_docker_client() try: container_info = docker_client.inspect_container(container) except (RequestException, DockerException) as e: logger.exception("Failed to inspect the container", container=container) raise exceptions.DockerError(str(e)) started_at = dateutil.parser.parse(container_info['State']['StartedAt']) finished_at = dateutil.parser.parse(container_info['State']['FinishedAt']) duration = finished_at - started_at duration_seconds = duration.total_seconds() if duration_seconds < 0: duration_seconds = -1 return { 'exit_code': container_info['State']['ExitCode'], 'duration': duration_seconds, 'oom_killed': container_info['State'].get('OOMKilled', False), } def demultiplex_docker_stream(data): """ Demultiplex the raw docker stream into separate stdout and stderr streams. Docker multiplexes streams together when there is no PTY attached, by sending an 8-byte header, followed by a chunk of data. The first 4 bytes of the header denote the stream from which the data came (i.e. 0x01 = stdout, 0x02 = stderr). Only the first byte of these initial 4 bytes is used. The next 4 bytes indicate the length of the following chunk of data as an integer in big endian format. This much data must be consumed before the next 8-byte header is read. Docs: https://docs.docker.com/engine/api/v1.24/#attach-to-a-container :param bytes data: A raw stream data. :return: A tuple `(stdout, stderr)` of bytes objects. """ data_length = len(data) stdout_chunks = [] stderr_chunks = [] walker = 0 while data_length - walker >= 8: header = data[walker:walker + docker_consts.STREAM_HEADER_SIZE_BYTES] stream_type, length = struct.unpack_from('>BxxxL', header) start = walker + docker_consts.STREAM_HEADER_SIZE_BYTES end = start + length walker = end if stream_type == 1: stdout_chunks.append(data[start:end]) elif stream_type == 2: stderr_chunks.append(data[start:end]) return b''.join(stdout_chunks), b''.join(stderr_chunks) def _socket_read(sock, n=4096): """ Read at most `n` bytes of data from the `sock` socket. :return: A bytes object or `None` at end of stream. """ try: data = os.read(sock.fileno(), n) except EnvironmentError as e: if e.errno in ERRNO_RECOVERABLE: return b'' raise e if data: return data def _socket_write(sock, data): """ Write as much data from the `data` buffer to the `sock` socket as possible. :return: The number of bytes sent. """ try: return os.write(sock.fileno(), data) except EnvironmentError as e: if e.errno in ERRNO_RECOVERABLE: return 0 raise e def docker_communicate(container, stdin=None, start_container=True, timeout=None): """ Interact with the container: Start it if required. Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. :param container: A container to interact with. :param bytes stdin: The data to be sent to the standard input of the container, or `None`, if no data should be sent. :param bool start_container: Whether to start the container after attaching to it. :param int timeout: Time in seconds to wait for the container to terminate, or `None` to make it unlimited. :return: A tuple `(stdout, stderr)` of bytes objects. :raise TimeoutError: If the container does not terminate after `timeout` seconds. The container is not killed automatically. :raise RequestException, DockerException, OSError: If an error occurred with the underlying docker system. """ # Retry on 'No such container' since it may happen when the attach/start # is called immediately after the container is created. docker_client = get_docker_client(retry_status_forcelist=(404, 500)) log = logger.bind(container=container) params = { # Attach to stdin even if there is nothing to send to it to be able # to properly close it (stdin of the container is always open). 'stdin': 1, 'stdout': 1, 'stderr': 1, 'stream': 1, 'logs': 0, } sock = docker_client.attach_socket(container, params=params) sock._sock.setblocking(False) # Make socket non-blocking log.info("Attached to the container", params=params, fd=sock.fileno(), timeout=timeout) if not stdin: log.debug("There is no input data. Shut down the write half " "of the socket.") sock._sock.shutdown(socket.SHUT_WR) if start_container: docker_client.start(container) log.info("Container started") stream_data = b'' start_time = time.time() while timeout is None or time.time() - start_time < timeout: read_ready, write_ready, _ = select.select([sock], [sock], [], 1) is_io_active = False if read_ready: is_io_active = True try: data = _socket_read(sock) except ConnectionResetError: log.warning("Connection reset caught on reading the container " "output stream. Break communication") break if data is None: log.debug("Container output reached EOF. Closing the socket") break stream_data += data if write_ready and stdin: is_io_active = True try: written = _socket_write(sock, stdin) except BrokenPipeError: # Broken pipe may happen when a container terminates quickly # (e.g. OOM Killer) and docker manages to close the socket # almost immediately before we're trying to write to stdin. log.warning("Broken pipe caught on writing to stdin. Break " "communication") break stdin = stdin[written:] if not stdin: log.debug("All input data has been sent. Shut down the write " "half of the socket.") sock._sock.shutdown(socket.SHUT_WR) if not is_io_active: # Save CPU time time.sleep(0.05) else: sock.close() raise TimeoutError("Container didn't terminate after timeout seconds") sock.close() return demultiplex_docker_stream(stream_data) def filter_filenames(files): return [file['name'] for file in files if 'name' in file] def merge_limits_defaults(limits): if not limits: return config.DEFAULT_LIMITS is_realtime_specified = 'realtime' in limits for limit_name, default_value in config.DEFAULT_LIMITS.items(): if limit_name not in limits: limits[limit_name] = default_value if not is_realtime_specified: limits['realtime'] = limits['cputime'] * config.CPU_TO_REAL_TIME_FACTOR return limits def create_ulimits(limits): ulimits = [] if limits['cputime']: cpu = limits['cputime'] ulimits.append(Ulimit(name='cpu', soft=cpu, hard=cpu)) if 'file_size' in limits: fsize = limits['file_size'] ulimits.append(Ulimit(name='fsize', soft=fsize, hard=fsize)) return ulimits or None def truncate_result(result): MAX_OUTPUT_LENGTH = 100 truncated = {} for k, v in result.items(): if k in ['stdout', 'stderr']: if len(v) > MAX_OUTPUT_LENGTH: v = v[:MAX_OUTPUT_LENGTH] + b' *** truncated ***' truncated[k] = v return truncated def is_killed_by_sigkill_or_sigxcpu(status): return status - 128 in [signal.SIGKILL, signal.SIGXCPU] PK!Ox300epicbox-0.6.2.dist-info/LICENSEMIT License Copyright (c) 2018 Pavel Sviderski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PK!H\TTepicbox-0.6.2.dist-info/WHEEL 1 0 нR \I$ơ7.ZON `h6oi14m,b4>4ɛpK>X;baP>PK!Hf epicbox-0.6.2.dist-info/METADATAXrۺ}We] O694=әGHHƘ$X߻6HJ%=}l6eI: '*|gʐ\E ^/i*6_3u2摎%W2*tt# _wVd1,IS 毝m뭔.Aѝfի"Ytv!mbv]k U oTޕPIsWKڬBi[lBt`y; i"UK%a!?yn1ާ#FHSG K˛ѳOH; {3Ps7d0e[Op:!X+ۍ!!^g8i?lu-wpÕՉӒ6MP4gyAGN©H$ɖ(oʨdG{cƒ}Mٌ‚\gT*cs,F]\/hWrM2'{tDp| pٲ|-NRC6qw-tnE| Rgw>aIo=virzXwmV.@X|j@ s)P[r>|2EJA{6 dB{YH4 F!=o˷ی IejN/vx*Sm+{S?|NԞ tFCPs$BxII1py6!^IW$964EYksjڸY]\T+q ^~ViGk+fV y *lTVܨs[>NE-î=kCDb(輴;kQoFԃ[%|\ #t&菆Ó״Lgu:QI"z{T l";|^efN!9Nj2F ^rr) Se' ݸn=Ȏౌ ǴyĬ@Σ:!uRfK%A U8pOkUͪ<whN<ȥ]sKjT 5Zj8(u T}SXd>@nP3cKVЮD~W]Ag:KW@ b@.G4g'7u):d4+L2n(*a LD/^`^@e ^U;Ơ2^UiAtWa{םup}G|A*u+B#@QN) 1Eh>0C[dY*Un7戛"(DXC'0@CMal;B9Q:)R Բs)RhK!k W&3 +\wA= Q}zKэ.Z wk|{P~e" =D5n ߪD?^qU齺(7'6xxN?F^<9Gx3lߐh\FXlw :eߠBGJVQGUM}ZF7]Kw9x'PmncɵU^C2ؚ徧~a2_eQhy2pod d#~uE'Dɋ/M+D+Epzzo_/>șPK!HlAepicbox-0.6.2.dist-info/RECORDuй@἟*@`i6PR8Ocbtdi7}& i MegȮ_)_1;lssknUËumA'mQ&vnW,bDn5ǮA Sx46fH[s|EW|&9;Rj.Zr{Aޡ3,rU0ji3_PK! ;#//epicbox/__init__.pyPK!OsmU  `epicbox/config.pyPK!br epicbox/exceptions.pyPK! &}.}. epicbox/sandboxes.pyPK! /}((O9epicbox/utils.pyPK!Ox300aepicbox-0.6.2.dist-info/LICENSEPK!H\TTeepicbox-0.6.2.dist-info/WHEELPK!Hf ~fepicbox-0.6.2.dist-info/METADATAPK!HlA"nepicbox-0.6.2.dist-info/RECORDPK up