PK!8&&reprobench/__init__.pyname = "reprobench" VERSION = "0.1.0" PK!+tzreprobench/console/main.py#!/usr/bin/env python import os.path import argparse import sys import strictyaml import click from loguru import logger from reprobench.core.schema import schema from reprobench.utils import import_class from reprobench.runners import LocalRunner, SlurmRunner @click.group() @click.version_option(version="0.1.0") @click.option("--verbose", "-v", "verbosity", count=True, default=0, help="Verbosity") def cli(verbosity): logger.remove() if verbosity == 0: logger.add(sys.stderr, level="ERROR") elif verbosity == 1: logger.add(sys.stderr, level="WARNING") elif verbosity == 2: logger.add(sys.stderr, level="INFO") elif verbosity == 3: logger.add(sys.stderr, level="DEBUG") elif verbosity >= 4: logger.add(sys.stderr, level="TRACE") @cli.group() def run(): pass @run.command("local") @click.option( "-o", "--output-dir", type=click.Path(file_okay=False, writable=True, resolve_path=True), default="./output", required=True, show_default=True, ) @click.option("-r", "--resume", is_flag=True) @click.argument("config", type=click.File("r")) def local_runner(output_dir, resume, config): config_text = config.read() config = strictyaml.load(config_text, schema=schema).data runner = LocalRunner(config, output_dir, resume) runner.run() @run.command("slurm") @click.option( "-o", "--output-dir", type=click.Path(file_okay=False, writable=True, resolve_path=True), default="./output", required=True, show_default=True, ) @click.option("-r", "--resume", is_flag=True) @click.option("-t", "--teardown", is_flag=True) @click.option("-p", "--python-path", required=True) @click.argument("config", type=click.File("r")) def local_runner(output_dir, resume, teardown, python_path, config): config_path = os.path.realpath(config.name) config_text = config.read() config = strictyaml.load(config_text, schema=schema).data runner = SlurmRunner( config=config, config_path=config_path, output_dir=output_dir, resume=resume, teardown=teardown, python_path=python_path, ) runner.run() if __name__ == "__main__": cli() PK!reprobench/core/bases.pyclass Runner: def __init__(self, config): self.config = config def run(self): pass class Step: def run(self, context): pass class Tool: name = "Base Tool" REQUIRED_PATHS = [] def setup(self): pass def version(self): return "1.0.0" def pre_run(self, context): pass def cmdline(self, context): pass def post_run(self, context): pass def teardown(self): pass PK!kރreprobench/core/db.pyfrom pathlib import Path from datetime import datetime from peewee import Proxy, Model from playhouse.apsw_ext import ( DateTimeField, CharField, ForeignKeyField, IntegerField, BooleanField, ) db = Proxy() class BaseModel(Model): created_at = DateTimeField(default=datetime.now) class Meta: database = db class Limit(BaseModel): type = CharField(max_length=32, unique=True) value = CharField() class TaskCategory(BaseModel): title = CharField() class Task(BaseModel): category = ForeignKeyField(TaskCategory, backref="tasks", on_delete="cascade") path = CharField() class Tool(BaseModel): module = CharField(unique=True) name = CharField() version = CharField(null=True) class ParameterCategory(BaseModel): title = CharField() class Parameter(BaseModel): category = ForeignKeyField( ParameterCategory, backref="parameters", on_delete="cascade" ) key = CharField() value = CharField() class Meta: indexes = ((("category", "key"), True),) class Run(BaseModel): FAILED = -2 CANCELED = -1 PENDING = 0 SUBMITTED = 1 RUNNING = 2 DONE = 3 STATUS_CHOICES = ( (FAILED, "Failed"), (CANCELED, "Canceled"), (PENDING, "Pending"), (SUBMITTED, "Submitted"), (RUNNING, "Running"), (DONE, "Done"), ) TIMEOUT = "TLE" MEMOUT = "MEM" RUNTIME_ERR = "RTE" OUTPUT_LIMIT = "OLE" SUCCESS = "OK" VERDICT_CHOICES = ( (TIMEOUT, "Time Limit Exceeded"), (MEMOUT, "Memory Limit Exceeded"), (RUNTIME_ERR, "Runtime Error"), (OUTPUT_LIMIT, "Output Limit Exceeded"), (SUCCESS, "Run Successfully"), ) tool = ForeignKeyField(Tool, backref="runs", on_delete="cascade") parameter_category = ForeignKeyField( ParameterCategory, backref="runs", on_delete="cascade" ) task = ForeignKeyField(Task, backref="runs", on_delete="cascade") status = IntegerField(choices=STATUS_CHOICES, default=PENDING) verdict = CharField(choices=VERDICT_CHOICES, max_length=3, null=True) directory = CharField(null=True) valid = BooleanField(null=True) return_signal = IntegerField(null=True) return_code = IntegerField(null=True) class Meta: only_save_dirty = True class RunStatistic(BaseModel): CPU_TIME = "cpu" WALL_TIME = "wall" MEM_USAGE = "mem" KEY_CHOICES = ( (CPU_TIME, "CPU Time (s)"), (WALL_TIME, "Wall Clock Time (s)"), (MEM_USAGE, "Max Memory Usage (KiB)"), ) run = ForeignKeyField(Run, backref="statistics", on_delete="cascade") key = CharField(choices=KEY_CHOICES) value = CharField() MODELS = [ Limit, TaskCategory, Task, ParameterCategory, Parameter, Run, RunStatistic, Tool, ] def db_bootstrap(config): db.connect() db.create_tables(MODELS) Limit.insert_many( [{"type": key, "value": value} for (key, value) in config["limits"].items()] ).execute() Tool.insert_many( [{"name": name, "module": module} for (name, module) in config["tools"].items()] ).execute() for (category, parameters) in config["parameters"].items(): parameter_category = ParameterCategory.create(title=category) for (key, value) in parameters.items(): Parameter.create(category=parameter_category, key=key, value=value) for (category, task) in config["tasks"].items(): task_category = TaskCategory.create(title=category) assert task["type"] == "folder" for file in Path().glob(task["path"]): Task.create(category=task_category, path=str(file)) PK!"]+;77reprobench/core/exceptions.pyclass ExecutableNotFoundError(RuntimeError): pass PK!Xreprobench/core/schema.pyfrom strictyaml import Map, Regex, Seq, Str, Int, Optional, Seq, MapPattern, Enum, Bool limits_schema = Map( { "time": Int(), Optional("memory", default=8192): Int(), Optional("output"): Int(), Optional("cores"): Str(), } ) module_schema = Regex(r"\.?\w+(\.\w+)*") step_schema = Seq(Map({"step": module_schema})) schema = Map( { "title": Str(), Optional("description"): Str(), "limits": limits_schema, "steps": Map({"run": step_schema, Optional("compile"): step_schema}), "tasks": MapPattern( Str(), Map({"type": Enum(["folder"]), Optional("path"): Str()}) ), "tools": MapPattern(Str(), module_schema), "parameters": MapPattern(Str(), MapPattern(Str(), Str())), } ) PK! kqq reprobench/executors/__init__.pyfrom .runsolver import RunsolverExecutor from .pynisher import PynisherExecutor from .psmon import PsmonExecutor PK!!zreprobench/executors/isolate.pyimport subprocess import functools import operator from reprobench.core.bases import Step from reprobench.utils import find_executable, silent_run # Not working yet class IsolateExecutor(Step): def __init__(self): self.executable = find_executable("isolate") def run(self, context): tool = context['tool'] tool.pre_run(context['task'], context['parameters']) output = subprocess.check_output( # print( [ self.executable, "--run", "-e" ] + functools.reduce(operator.iconcat, [ ["-d", f"{path}:rw"] for path in tool.REQUIRED_PATHS ]) + [ "--" ] + tool.cmdline(context['task'], context['parameters']) ) return output PK!1{{reprobench/executors/psmon.pyimport subprocess from loguru import logger from pathlib import Path from psmon.main import run from reprobench.core.bases import Step from reprobench.core.db import db, Run, RunStatistic class PsmonExecutor(Step): def run(self, context): tool = context["tool"] limits = context["limits"] tool.pre_run(context) cwd = context["run"].directory out_file = (Path(cwd) / "run.out").open("wb") err_file = (Path(cwd) / "run.err").open("wb") context["run"].status = Run.RUNNING context["run"].save() cmd = tool.cmdline(context) logger.debug(f"Running {cwd}") logger.trace(cmd) stats = run( cmd, cwd=cwd, stdout=out_file, stderr=err_file, cpu_time_limit=limits["time"], wall_time_limit=limits["time"] + 15, memory_limit=limits["memory"], freq=15, ) logger.debug(f"Finished {cwd}") logger.debug(stats) context["run"].status = Run.DONE context["run"].return_code = stats["return_code"] if stats["error"] == TimeoutError: context["run"].verdict = Run.TIMEOUT elif stats["error"] == MemoryError: context["run"].verdict = Run.MEMOUT elif stats["error"] or stats["return_code"] != 0: context["run"].verdict = Run.RUNTIME_ERR else: context["run"].verdict = Run.SUCCESS context["run"].save() RunStatistic.create( run=context["run"], key=RunStatistic.WALL_TIME, value=stats["wall_time"] ) RunStatistic.create( run=context["run"], key=RunStatistic.CPU_TIME, value=stats["cpu_time"] ) RunStatistic.create( run=context["run"], key=RunStatistic.MEM_USAGE, value=stats["max_memory"] ) tool.post_run(context) PK! 5 reprobench/executors/pynisher.pyimport pynisher import subprocess from loguru import logger from pathlib import Path from reprobench.core.bases import Step from reprobench.core.db import db, Run, RunStatistic class PynisherExecutor(Step): def run(self, context): tool = context["tool"] limits = context["limits"] tool.pre_run(context) cwd = context["run"].directory out_file = (Path(cwd) / "run.out").open("wb") err_file = (Path(cwd) / "run.err").open("wb") context["run"].status = Run.RUNNING context["run"].save() def run_tool(): subprocess.run( tool.cmdline(context), cwd=cwd, stdout=out_file, stderr=err_file ) logger.debug(f"Running {cwd}") fun = pynisher.enforce_limits( cpu_time_in_s=limits["time"], mem_in_mb=limits["memory"] )(run_tool) fun() logger.debug(f"Finished {cwd}") context["run"].status = Run.DONE context["run"].verdict = Run.SUCCESS context["run"].save() RunStatistic.create( run=context["run"], key=RunStatistic.WALL_TIME, value=fun.wall_clock_time ) RunStatistic.create( run=context["run"], key=RunStatistic.CPU_TIME, value=fun.resources_function[0] + fun.resources_function[1], ) # utime RunStatistic.create( run=context["run"], key=RunStatistic.MEM_USAGE, value=fun.resources_function[2], ) # maxrss tool.post_run(context) PK!IA:9 9 !reprobench/executors/runsolver.pyimport subprocess import functools import operator from pathlib import Path from reprobench.core.bases import Step from reprobench.core.db import Run, RunStatistic from reprobench.utils import find_executable, silent_run class RunsolverExecutor(Step): def __init__(self): self.executable = find_executable("runsolver") def run(self, context): tool = context["tool"] limits = context["limits"] tool.pre_run(context) cwd = context["working_directory"] out_file = (Path(cwd) / "run.out").open("wb") err_file = (Path(cwd) / "run.err").open("wb") context["run"].status = Run.RUNNING context["run"].save() process = subprocess.run( [ self.executable, "-w", "run.watcher", "-v", "run.stat", "--cores", limits["cores"], "-C", str(limits["time"]), "--vsize-limit", str(limits["memory"]), # "-O", "0,{}".format(limits["output"]), "--", ] + tool.cmdline(context), cwd=cwd, stdout=out_file, stderr=err_file, ) context["run"].status = Run.DONE context["run"].verdict = Run.SUCCESS context["run"].save() tool.post_run(context) out_file.close() err_file.close() stat_file = Path(cwd) / "run.stat" stat_map = { "WCTIME": RunStatistic.WALL_TIME, "CPUTIME": RunStatistic.CPU_TIME, "MAXVM": RunStatistic.MEM_USAGE, } with stat_file.open() as f: for line in f: if line.startswith("#"): continue key, value = line.split("=") if key in stat_map: RunStatistic.create( run=context["run"], key=stat_map[key], value=value ) elif key == "TIMEOUT" and value == "true": context["run"].verdict = Run.TIMEOUT context["run"].save() elif key == "MEMOUT" and value == "true": context["run"].verdict = Run.MEMOUT context["run"].save() PK!HK>>reprobench/runners/__init__.pyfrom .local import LocalRunner from .slurm import SlurmRunner PK!̨reprobench/runners/local.pyimport os import signal import itertools import time import atexit from tqdm import tqdm from loguru import logger from multiprocessing.pool import Pool from pathlib import Path from datetime import datetime from playhouse.apsw_ext import APSWDatabase from reprobench.core.bases import Runner from reprobench.core.db import db, db_bootstrap, Run, Tool, ParameterCategory, Task from reprobench.utils import import_class def execute_run(args): run_id, config, db_path = args run = Run.get_by_id(run_id) ToolClass = import_class(run.tool.module) tool_instance = ToolClass() db.initialize(APSWDatabase(str(db_path))) context = config.copy() context["tool"] = tool_instance context["run"] = run logger.info(f"Processing task: {run.directory}") @atexit.register def exit(): signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(os.getpgid(0), signal.SIGTERM) sleep(3) os.killpg(os.getpgid(0), signal.SIGKILL) for runstep in config["steps"]["run"]: Step = import_class(runstep["step"]) step = Step() step.run(context) class LocalRunner(Runner): def __init__(self, config, output_dir="./output", resume=False): self.config = config self.output_dir = output_dir self.resume = resume self.queue = [] def setup(self): # signal.signal(signal.SIGTERM, self.exit) # signal.signal(signal.SIGINT, self.exit) # signal.signal(signal.SIGHUP, signal.SIG_IGN) atexit.register(self.exit) self.db_path = Path(self.output_dir) / f"{self.config['title']}.benchmark.db" db_created = Path(self.db_path).is_file() if db_created and not self.resume: logger.error( "It seems that a previous runs exist at the output directory. Please use --resume to resume runs." ) exit(1) Path(self.output_dir).mkdir(parents=True, exist_ok=True) logger.debug(f"Creating Database: {self.db_path}") self.database = APSWDatabase(str(self.db_path)) db.initialize(self.database) if not db_created: logger.info("Bootstrapping db...") db_bootstrap(self.config) logger.info("Initializing runs...") self.init_runs() def create_working_directory( self, tool_name, parameter_category, task_category, filename ): path = ( Path(self.output_dir) / tool_name / parameter_category / task_category / filename ) path.mkdir(parents=True, exist_ok=True) return path def exit(self): if self.num_in_queue > 0: self.pool.terminate() self.pool.join() def populate_unfinished_runs(self): query = Run.select(Run.id).where(Run.status < Run.DONE) self.queue = [(run.id, self.config, self.db_path) for run in query] def init_runs(self): for tool_name, tool_module in self.config["tools"].items(): for (parameter_category_name, (task_category, task)) in itertools.product( self.config["parameters"], self.config["tasks"].items() ): # only folder task type for now assert task["type"] == "folder" files = Path().glob(task["path"]) for file in files: context = self.config.copy() directory = self.create_working_directory( tool_name, parameter_category_name, task_category, file.name ) tool = Tool.get(Tool.module == tool_module) parameter_category = ParameterCategory.get( ParameterCategory.title == parameter_category_name ) task = Task.get(Task.path == str(file)) run = Run.create( tool=tool, task=task, parameter_category=parameter_category, directory=directory, status=Run.SUBMITTED, ) self.queue.append((run.id, self.config, self.db_path)) def run(self): self.setup() if self.resume: logger.info("Resuming unfinished runs...") self.populate_unfinished_runs() self.num_in_queue = len(self.queue) if self.num_in_queue == 0: logger.success("No tasks remaining to run") exit(0) logger.debug("Running setup on all tools...") tools = [] for tool_module in self.config["tools"].values(): ToolClass = import_class(tool_module) tool_instance = ToolClass() tool_instance.setup() tools.append(tool_instance) logger.debug("Executing runs...") self.pool = Pool() it = self.pool.imap_unordered(execute_run, self.queue) for result in tqdm(it, total=self.num_in_queue): self.num_in_queue -= 1 self.pool.close() self.pool.join() logger.debug("Running teardown on all tools...") for tool in tools: tool.teardown() # self.database.stop() PK!1m00$reprobench/runners/slurm/__init__.pyimport os import signal import itertools import time import atexit import subprocess from string import Template from tqdm import tqdm from loguru import logger from multiprocessing.pool import Pool from pathlib import Path from datetime import datetime from playhouse.apsw_ext import APSWDatabase from reprobench.core.bases import Runner from reprobench.core.db import db, db_bootstrap, Run, Tool, ParameterCategory, Task from reprobench.utils import import_class DIR = os.path.dirname(os.path.realpath(__file__)) class SlurmRunner(Runner): def __init__( self, config, config_path, python_path, output_dir="./output", resume=False, teardown=False, ): self.config = config self.config_path = config_path self.output_dir = output_dir self.python_path = python_path self.resume = resume self.teardown = teardown self.queue = [] def setup(self): atexit.register(self.exit) self.db_path = Path(self.output_dir) / f"{self.config['title']}.benchmark.db" db_created = Path(self.db_path).is_file() if db_created and not self.resume: logger.error( "It seems that a previous runs exist at the output directory. Please use --resume to resume runs." ) exit(1) Path(self.output_dir).mkdir(parents=True, exist_ok=True) logger.debug(f"Creating Database: {self.db_path}") self.database = APSWDatabase(str(self.db_path)) db.initialize(self.database) if not db_created: logger.info("Bootstrapping db...") db_bootstrap(self.config) logger.info("Initializing runs...") self.init_runs() def create_working_directory( self, tool_name, parameter_category, task_category, filename ): path = ( Path(self.output_dir) / tool_name / parameter_category / task_category / filename ) path.mkdir(parents=True, exist_ok=True) return path def exit(self): pass def init_runs(self): for tool_name, tool_module in self.config["tools"].items(): for (parameter_category_name, (task_category, task)) in itertools.product( self.config["parameters"], self.config["tasks"].items() ): # only folder task type for now assert task["type"] == "folder" files = Path().glob(task["path"]) for file in files: context = self.config.copy() directory = self.create_working_directory( tool_name, parameter_category_name, task_category, file.name ) tool = Tool.get(Tool.module == tool_module) parameter_category = ParameterCategory.get( ParameterCategory.title == parameter_category_name ) task = Task.get(Task.path == str(file)) run = Run.create( tool=tool, task=task, parameter_category=parameter_category, directory=directory, ) self.queue.append(run.id) def run(self): if not self.teardown: self.setup() logger.debug("Running setup on all tools...") tools = [] for tool_module in self.config["tools"].values(): ToolClass = import_class(tool_module) tool_instance = ToolClass() tool_instance.setup() tools.append(tool_instance) logger.debug("Generating template") with open(Path(DIR) / "./slurm.job.tpl") as tpl: template = Template(tpl.read()) job_str = template.safe_substitute( python_path=self.python_path, config_path=self.config_path, db_path=self.db_path, ) slurm_job_path = Path(self.output_dir) / "slurm.job" with open(slurm_job_path, "w") as job: job.write(job_str) logger.info("Submitting job array to SLURM...") sbatch_cmd = [ "sbatch", "-a", f"1-{len(self.queue)}", str(slurm_job_path.resolve()), ] logger.debug(" ".join(sbatch_cmd)) subprocess.run(sbatch_cmd) else: logger.debug("Running teardown on all tools...") for tool_module in self.config["tools"].values(): ToolClass = import_class(tool_module) tool_instance = ToolClass() tool_instance.teardown() PK!A&reprobench/runners/slurm/slurm.job.tpl#!/bin/bash #SBATCH --export=all srun -- $python_path -m reprobench.runners.slurm.slurm_worker -c $config_path -d $db_path $SLURM_ARRAY_TASK_ID PK!(9))(reprobench/runners/slurm/slurm_worker.pyimport os import signal import click import atexit import strictyaml import time from loguru import logger from playhouse.apsw_ext import APSWDatabase from reprobench.core.schema import schema from reprobench.core.db import db, Run from reprobench.utils import import_class @click.command() @click.option("-c", "--config", required=True, type=click.File()) @click.option( "-d", "--database", required=True, type=click.Path(dir_okay=False, resolve_path=True), ) @click.argument("run_id", type=int) def run(config, database, run_id): config = config.read() config = strictyaml.load(config, schema=schema).data db.initialize(APSWDatabase(str(database))) run = Run.get_by_id(run_id) ToolClass = import_class(run.tool.module) tool_instance = ToolClass() context = config.copy() context["tool"] = tool_instance context["run"] = run logger.info(f"Processing task: {run.directory}") @atexit.register def exit(): signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(os.getpgid(0), signal.SIGTERM) time.sleep(3) os.killpg(os.getpgid(0), signal.SIGKILL) for runstep in config["steps"]["run"]: Step = import_class(runstep["step"]) step = Step() step.run(context) if __name__ == "__main__": run() PK!Creprobench/tools/executable.pyimport subprocess import tempfile import shutil from pathlib import Path from uuid import uuid4 from reprobench.core.bases import Tool from reprobench.utils import find_executable, silent_run class ExecutableTool(Tool): name = "Basic Executable Tool" path = None PK!9Rreprobench/tools/reprozip.pyimport subprocess import tempfile import shutil from pathlib import Path from uuid import uuid4 from reprobench.core.bases import Tool from reprobench.utils import find_executable, silent_run class ReprozipTool(Tool): name = "Reprozip-based Tool" path = None runner = "directory" REQUIRED_PATHS = [ str((Path(find_executable("reprounzip")) / ".." / "..").resolve()), tempfile.gettempdir(), ] def __init__(self): self.reprounzip = find_executable("reprounzip") self.dir = f"{tempfile.gettempdir()}/reprounzip-{uuid4()}" self.base_command = [self.reprounzip, self.runner] def setup(self): silent_run(self.base_command + ["setup", self.path, self.dir]) def cmdline(self, context): return self.base_command + ["run", self.dir] def teardown(self): silent_run(self.base_command + ["destroy", self.dir]) PK!zjreprobench/utils.pyimport logging import importlib import subprocess from reprobench.core.exceptions import ExecutableNotFoundError from shutil import which log = logging.getLogger(__name__) def find_executable(executable): path = which(executable) if path is None: raise ExecutableNotFoundError return path def silent_run(command): log.debug(f"Running: {command}") return subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) def import_class(path): module_path, tail = ".".join(path.split(".")[:-1]), path.split(".")[-1] module = importlib.import_module(module_path) return getattr(module, tail) PK!HJ.:+reprobench-0.1.0.dist-info/entry_points.txtN+I/N.,()*J-(OJKΰE0r3s2PK!XB33"reprobench-0.1.0.dist-info/LICENSEMIT License Copyright (c) 2019 Rakha Kanz Kautsar Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PK!Hu)GTU reprobench-0.1.0.dist-info/WHEEL HM K-*ϳR03rOK-J,/R(O-)$qzd&Y)r$UV&UrPK!HgV#reprobench-0.1.0.dist-info/METADATA]O0+z k6Ɨ #e{ٚhWj$3ћ=sλ$"+HEQNDHB)x9_Ss61"-n @w5zvBZpp$q|f-r4>;]eB/g7 FMc55UmPS: J{ Ca)6_VR0Fy愧ڴ|boR1*DTGta7{ػ<ƹ%G-D!R-Ead-PK!H$l!reprobench-0.1.0.dist-info/RECORDIV}2E(( I@F}L',XN}EuI5aRGهu>]> 0@!!2 KO4qZV?< ; h_,j)*]sNa=a*,o*z>]}ү0wi N&}޲3B[Y6(L 4ŋVE4l_?c:;v嶕 +<,VENa-W.xS@İ}v˥~•F@!}%U&ǶWWc>-TdI Ĕȑ=OTFtF)-JrڎgfeA$-((W.0IvXz728ڶ{~zϡqyW{[h_]\Kc.Q՘Vu+jbfGEY`зonMbq޶aZ,aìȓsz%O!:SxWFC AZku9 XJUA)7CӔ?CH^)<>.Sօ Awdjr1oO,Q3okR)S%nRDplo3S0M %bO8zM<)]چR N \hc=EdU`adnM#n&~3Ï#4ӸS'h K.sy z84^@ 'qǟ/X+3u<` 3Zvj9ΏT#+1SoжMxwE.[Rc]Ov:/o-Y!,O9Tʼ1#hVe} 7.+&>(9reprobench/runners/__init__.pyPK!̨9reprobench/runners/local.pyPK!1m00$Nreprobench/runners/slurm/__init__.pyPK!A&breprobench/runners/slurm/slurm.job.tplPK!(9))(breprobench/runners/slurm/slurm_worker.pyPK!CZhreprobench/tools/executable.pyPK!9Rireprobench/tools/reprozip.pyPK!zjlmreprobench/utils.pyPK!HJ.:+'preprobench-0.1.0.dist-info/entry_points.txtPK!XB33"preprobench-0.1.0.dist-info/LICENSEPK!Hu)GTU ureprobench-0.1.0.dist-info/WHEELPK!HgV#ureprobench-0.1.0.dist-info/METADATAPK!H$l!:wreprobench-0.1.0.dist-info/RECORDPK){