PK!ת22openbm/.pytest_cache/.gitignore# created by pytest automatically, do not change *PK! ''openbm/.pytest_cache/README.md# pytest cache directory # This directory contains data from the pytest's cache plugin, which provides the `--lf` and `--ff` options, as well as the `cache` fixture. **Do not** commit this to version control. See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. PK!)L $openbm/.pytest_cache/v/cache/nodeids[]PK!)L %openbm/.pytest_cache/v/cache/stepwise[]PK!Ŗ~openbm/bin/activate# This file must be used with "source bin/activate" *from bash* # you cannot run it directly deactivate () { unset pydoc # reset old environment variables if [ -n "$_OLD_VIRTUAL_PATH" ] ; then PATH="$_OLD_VIRTUAL_PATH" export PATH unset _OLD_VIRTUAL_PATH fi if [ -n "$_OLD_VIRTUAL_PYTHONHOME" ] ; then PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" export PYTHONHOME unset _OLD_VIRTUAL_PYTHONHOME fi # This should detect bash and zsh, which have a hash command that must # be called to get it to forget past commands. Without forgetting # past commands the $PATH changes we made may not be respected if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then hash -r 2>/dev/null fi if [ -n "$_OLD_VIRTUAL_PS1" ] ; then PS1="$_OLD_VIRTUAL_PS1" export PS1 unset _OLD_VIRTUAL_PS1 fi unset VIRTUAL_ENV if [ ! "$1" = "nondestructive" ] ; then # Self destruct! unset -f deactivate fi } # unset irrelevant variables deactivate nondestructive VIRTUAL_ENV="/home/ocurero/projects/openbatchman/src" export VIRTUAL_ENV _OLD_VIRTUAL_PATH="$PATH" PATH="$VIRTUAL_ENV/bin:$PATH" export PATH # unset PYTHONHOME if set # this will fail if PYTHONHOME is set to the empty string (which is bad anyway) # could use `if (set -u; : $PYTHONHOME) ;` in bash if [ -n "$PYTHONHOME" ] ; then _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" unset PYTHONHOME fi if [ -z "$VIRTUAL_ENV_DISABLE_PROMPT" ] ; then _OLD_VIRTUAL_PS1="$PS1" if [ "x" != x ] ; then PS1="$PS1" else if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then # special case for Aspen magic directories # see http://www.zetadev.com/software/aspen/ PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" else PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" fi fi export PS1 fi alias pydoc="python -m pydoc" # This should detect bash and zsh, which have a hash command that must # be called to get it to forget past commands. Without forgetting # past commands the $PATH changes we made may not be respected if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then hash -r 2>/dev/null fi PK!ϰopenbm/bin/activate.csh# This file must be used with "source bin/activate.csh" *from csh*. # You cannot run it directly. # Created by Davide Di Blasi . alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' # Unset irrelevant variables. deactivate nondestructive setenv VIRTUAL_ENV "/home/ocurero/projects/openbatchman/src" set _OLD_VIRTUAL_PATH="$PATH" setenv PATH "$VIRTUAL_ENV/bin:$PATH" if ("" != "") then set env_name = "" else if (`basename "$VIRTUAL_ENV"` == "__") then # special case for Aspen magic directories # see http://www.zetadev.com/software/aspen/ set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` else set env_name = `basename "$VIRTUAL_ENV"` endif endif # Could be in a non-interactive environment, # in which case, $prompt is undefined and we wouldn't # care about the prompt anyway. if ( $?prompt ) then set _OLD_VIRTUAL_PROMPT="$prompt" set prompt = "[$env_name] $prompt" endif unset env_name alias pydoc python -m pydoc rehash PK!,7mo o openbm/bin/activate.fish# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) # you cannot run it directly function deactivate -d "Exit virtualenv and return to normal shell environment" # reset old environment variables if test -n "$_OLD_VIRTUAL_PATH" set -gx PATH $_OLD_VIRTUAL_PATH set -e _OLD_VIRTUAL_PATH end if test -n "$_OLD_VIRTUAL_PYTHONHOME" set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME set -e _OLD_VIRTUAL_PYTHONHOME end if test -n "$_OLD_FISH_PROMPT_OVERRIDE" functions -e fish_prompt set -e _OLD_FISH_PROMPT_OVERRIDE . ( begin printf "function fish_prompt\n\t#" functions _old_fish_prompt end | psub ) functions -e _old_fish_prompt end set -e VIRTUAL_ENV if test "$argv[1]" != "nondestructive" # Self destruct! functions -e deactivate end end # unset irrelevant variables deactivate nondestructive set -gx VIRTUAL_ENV "/home/ocurero/projects/openbatchman/src" set -gx _OLD_VIRTUAL_PATH $PATH set -gx PATH "$VIRTUAL_ENV/bin" $PATH # unset PYTHONHOME if set if set -q PYTHONHOME set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME set -e PYTHONHOME end if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" # fish uses a function instead of an env var to generate the prompt. # save the current fish_prompt function as the function _old_fish_prompt . ( begin printf "function _old_fish_prompt\n\t#" functions fish_prompt end | psub ) # with the original prompt function renamed, we can override with our own. function fish_prompt # Prompt override? if test -n "" printf "%s%s%s" "" (set_color normal) (_old_fish_prompt) return end # ...Otherwise, prepend env set -l _checkbase (basename "$VIRTUAL_ENV") if test $_checkbase = "__" # special case for Aspen magic directories # see http://www.zetadev.com/software/aspen/ printf "%s[%s]%s %s" (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) (_old_fish_prompt) else printf "%s(%s)%s%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) (_old_fish_prompt) end end set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" end PK!Ociiopenbm/bin/activate_this.py"""By using execfile(this_file, dict(__file__=this_file)) you will activate this virtualenv environment. This can be used when you must use an existing Python interpreter, not the virtualenv bin/python """ try: __file__ except NameError: raise AssertionError( "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))") import sys import os old_os_path = os.environ['PATH'] os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path base = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if sys.platform == 'win32': site_packages = os.path.join(base, 'Lib', 'site-packages') else: site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages') prev_sys_path = list(sys.path) import site site.addsitedir(site_packages) sys.real_prefix = sys.prefix sys.prefix = base # Move the added items to the front of the path: new_sys_path = [] for item in list(sys.path): if item not in prev_sys_path: new_sys_path.append(item) sys.path.remove(item) sys.path[:0] = new_sys_path PK!Qp__openbm/bin/circuits.bench#!/home/prologic/.virtualenvs/circuits/bin/python # -*- coding: utf-8 -*- """(Tool) Bench Marking Tool THis tool does some simple benchmaking of the circuits library. """ import sys import math import optparse from time import sleep if sys.platform == "win32": from time import clock as time else: from time import time # NOQA try: import hotshot import hotshot.stats except ImportError: hotshot = None # NOQA try: import psyco except ImportError: psyco = None # NOQA from circuits import __version__ as systemVersion from circuits import handler, Event, Component, Manager, Debugger USAGE = "%prog [options]" VERSION = "%prog v" + systemVersion def duration(seconds): days = int(seconds / 60 / 60 / 24) seconds = (seconds) % (60 * 60 * 24) hours = int((seconds / 60 / 60)) seconds = (seconds) % (60 * 60) mins = int((seconds / 60)) seconds = int((seconds) % (60)) return (days, hours, mins, seconds) def parse_options(): parser = optparse.OptionParser(usage=USAGE, version=VERSION) parser.add_option( "-t", "--time", action="store", type="int", default=0, dest="time", help="Stop after specified elapsed seconds" ) parser.add_option( "-e", "--events", action="store", type="int", default=0, dest="events", help="Stop after specified number of events" ) parser.add_option( "-p", "--profile", action="store_true", default=False, dest="profile", help="Enable execution profiling support" ) parser.add_option( "-d", "--debug", action="store_true", default=False, dest="debug", help="Enable debug mode" ) parser.add_option( "-m", "--mode", action="store", type="choice", default="speed", dest="mode", choices=["sync", "speed", "latency"], help="Operation mode" ) parser.add_option( "-s", "--speed", action="store_true", default=False, dest="speed", help="Enable psyco (circuits on speed!)" ) parser.add_option( "-q", "--quiet", action="store_false", default=True, dest="verbose", help="Suppress output" ) opts, args = parser.parse_args() return opts, args class stop(Event): """stop Event""" class term(Event): """term Event""" class hello(Event): """hello Event""" class received(Event): """received Event""" class Base(Component): def __init__(self, opts, *args, **kwargs): super(Base, self).__init__(*args, **kwargs) self.opts = opts class SpeedTest(Base): def received(self, message=""): self.fire(hello("hello")) def hello(self, message): self.fire(received(message)) class LatencyTest(Base): t = None def received(self, message=""): print("Latency: %0.9f us" % ((time() - self.t) * 1e6)) sleep(1) self.fire(hello("hello")) def hello(self, message=""): self.t = time() self.fire(received(message)) class State(Base): done = False def stop(self): self.fire(term()) def term(self): self.done = True class Monitor(Base): sTime = sys.maxsize events = 0 state = 0 @handler(filter=True) def event(self, *args, **kwargs): self.events += 1 if self.events > self.opts.events: self.stop() def main(): opts, args = parse_options() if opts.speed and psyco: psyco.full() manager = Manager() monitor = Monitor(opts) manager += monitor state = State(opts) manager += state if opts.debug: manager += Debugger() if opts.mode.lower() == "speed": if opts.verbose: print("Setting up Speed Test...") manager += SpeedTest(opts) monitor.sTime = time() elif opts.mode.lower() == "latency": if opts.verbose: print("Setting up Latency Test...") manager += LatencyTest(opts) monitor.sTime = time() if opts.verbose: print("Setting up Sender...") print("Setting up Receiver...") monitor.sTime = time() if opts.profile: if hotshot: profiler = hotshot.Profile("bench.prof") profiler.start() manager.fire(hello("hello")) while not state.done: try: manager.tick() if opts.events > 0 and monitor.events > opts.events: manager.fire(stop()) if opts.time > 0 and (time() - monitor.sTime) > opts.time: manager.fire(stop()) except KeyboardInterrupt: manager.fire(stop()) if opts.verbose: print() eTime = time() tTime = eTime - monitor.sTime events = monitor.events speed = int(math.ceil(float(monitor.events) / tTime)) print("Total Events: %d (%d/s after %0.2fs)" % (events, speed, tTime)) if opts.profile and hotshot: profiler.stop() profiler.close() stats = hotshot.stats.load("bench.prof") stats.strip_dirs() stats.sort_stats("time", "calls") stats.print_stats(20) if __name__ == "__main__": main() PK!;3*openbm/bin/circuits.web#!/home/ocurero/projects/openbatchman/src/bin/python # -*- coding: utf-8 -*- import re import sys from circuits.web.main import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main()) PK!qffopenbm/bin/easy_install#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==18.0.1','console_scripts','easy_install' __requires__ = 'setuptools==18.0.1' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('setuptools==18.0.1', 'console_scripts', 'easy_install')() ) PK!nnopenbm/bin/easy_install-2.7#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==18.0.1','console_scripts','easy_install-2.7' __requires__ = 'setuptools==18.0.1' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('setuptools==18.0.1', 'console_scripts', 'easy_install-2.7')() ) PK!շ<<openbm/bin/htpasswd#!/home/prologic/.virtualenvs/circuits/bin/python """Pure Python replacement for Apache's htpasswd Borrowed from: https://gist.github.com/eculver/1420227 Modifications by James Mills, prologic at shortcircuit dot net dot au - Added support for MD5 and SHA1 hashing. """ # Original author: Eli Carter import os import sys import random from hashlib import md5, sha1 from optparse import OptionParser try: from crypt import crypt except ImportError: try: from fcrypt import crypt except ImportError: crypt = None def salt(): """Returns a string of 2 randome letters""" letters = 'abcdefghijklmnopqrstuvwxyz' \ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \ '0123456789/.' return random.choice(letters) + random.choice(letters) class HtpasswdFile: """A class for manipulating htpasswd files.""" def __init__(self, filename, create=False, encryption=None): self.filename = filename if encryption is None: self.encryption = lambda p: md5(p).hexdigest() else: self.encryption = encryption self.entries = [] if not create: if os.path.exists(self.filename): self.load() else: raise Exception("%s does not exist" % self.filename) def load(self): """Read the htpasswd file into memory.""" lines = open(self.filename, 'r').readlines() self.entries = [] for line in lines: username, pwhash = line.split(':') entry = [username, pwhash.rstrip()] self.entries.append(entry) def save(self): """Write the htpasswd file to disk""" open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1]) for entry in self.entries]) def update(self, username, password): """Replace the entry for the given user, or add it if new.""" pwhash = self.encryption(password) matching_entries = [entry for entry in self.entries if entry[0] == username] if matching_entries: matching_entries[0][1] = pwhash else: self.entries.append([username, pwhash]) def delete(self, username): """Remove the entry for the given user.""" self.entries = [entry for entry in self.entries if entry[0] != username] def main(): """%prog [-c] -b filename username password Create or update an htpasswd file""" # For now, we only care about the use cases that affect tests/functional.py parser = OptionParser(usage=main.__doc__) parser.add_option('-b', action='store_true', dest='batch', default=False, help='Batch mode; password is passed on the command line IN THE CLEAR.' ) parser.add_option('-c', action='store_true', dest='create', default=False, help='Create a new htpasswd file, overwriting any existing file.') parser.add_option('-D', action='store_true', dest='delete_user', default=False, help='Remove the given user from the password file.') if crypt is not None: parser.add_option('-d', action='store_true', dest='crypt', default=False, help='Use crypt() encryption for passwords.') parser.add_option('-m', action='store_true', dest='md5', default=False, help='Use MD5 encryption for passwords. (Default)') parser.add_option('-s', action='store_true', dest='sha', default=False, help='Use SHA encryption for passwords.') options, args = parser.parse_args() def syntax_error(msg): """Utility function for displaying fatal error messages with usage help. """ sys.stderr.write("Syntax error: " + msg) sys.stderr.write(parser.get_usage()) sys.exit(1) if not options.batch: syntax_error("Only batch mode is supported\n") # Non-option arguments if len(args) < 2: syntax_error("Insufficient number of arguments.\n") filename, username = args[:2] if options.delete_user: if len(args) != 2: syntax_error("Incorrect number of arguments.\n") password = None else: if len(args) != 3: syntax_error("Incorrect number of arguments.\n") password = args[2] if options.crypt: encryption = lambda p: crypt(p, salt()) elif options.md5: encryption = lambda p: md5(p).hexdigest() elif options.sha: encryption = lambda p: sha1(p).hexdigest() else: encryption = lambda p: md5(p).hexdigest() passwdfile = HtpasswdFile( filename, create=options.create, encryption=encryption ) if options.delete_user: passwdfile.delete(username) else: passwdfile.update(username, password) passwdfile.save() if __name__ == '__main__': main() PK!AkPPopenbm/bin/pcreate#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','pcreate' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'pcreate')() ) PK!PJXXopenbm/bin/pdistreport#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','pdistreport' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'pdistreport')() ) PK!p~&openbm/bin/pip#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pip==7.1.0','console_scripts','pip' __requires__ = 'pip==7.1.0' import sys from pkg_resources import load_entry_point sys.exit( load_entry_point('pip==7.1.0', 'console_scripts', 'pip')() ) PK!Bopenbm/bin/pip2#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pip==7.1.0','console_scripts','pip2' __requires__ = 'pip==7.1.0' import sys from pkg_resources import load_entry_point sys.exit( load_entry_point('pip==7.1.0', 'console_scripts', 'pip2')() ) PK!openbm/bin/pip2.7#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pip==7.1.0','console_scripts','pip2.7' __requires__ = 'pip==7.1.0' import sys from pkg_resources import load_entry_point sys.exit( load_entry_point('pip==7.1.0', 'console_scripts', 'pip2.7')() ) PK!3RRopenbm/bin/prequest#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','prequest' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'prequest')() ) PK!ynPPopenbm/bin/proutes#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','proutes' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'proutes')() ) PK!gNNopenbm/bin/pserve#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','pserve' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'pserve')() ) PK!SNNopenbm/bin/pshell#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','pshell' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'pshell')() ) PK!YʲPPopenbm/bin/ptweens#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','ptweens' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'ptweens')() ) PK!QrNNopenbm/bin/pviews#!/home/ocurero/projects/openbatchman/src/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pyramid==1.5.7','console_scripts','pviews' __requires__ = 'pyramid==1.5.7' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('pyramid==1.5.7', 'console_scripts', 'pviews')() ) PK!S-openbm/bin/pythonELF4L4 (444  TTTPP04hhhDDPtdppp,,QtdRtd/lib/ld-linux.so.2GNUGNU10^yL2gƩz     @ BEj Cֻ| : K2bXqXj|  n1 @ T $ lah ,X PTlibpython2.7.so.1.0_ITM_deregisterTMCloneTable__gmon_start___Jv_RegisterClasses_ITM_registerTMCloneTablePy_Mainlibpthread.so.0libc.so.6_IO_stdin_used__libc_start_main_edata__bss_start_end__libc_csu_fini__data_start__libc_csu_init_fp_hwGLIBC_2.0ii  Sßt*[5%% h%h%hUSD4E UD$$]1^PTRhPhQVhf$ffffff#- wøtU$ Ðt& - uútUD$$ Ð&= uU| fttU$ytffUW1VS%l$0U)t'D$8,$D$D$4D$9u߃[^_] fSã[;(DPhpzR|  @F J tx?;*2$" @,AB Dd 8daAA CAN0HAA AAv X ToH  @8oooOsPWGpython2.7.debug .shstrtab.interp.note.ABI-tag.note.gnu.build-id.gnu.hash.dynsym.dynstr.gnu.version.gnu.version_r.rel.dyn.rel.plt.init.text.fini.rodata.eh_frame_hdr.eh_frame.init_array.fini_array.jcr.dynamic.got.got.plt.data.bss.comment.SUSE.OPTs.gnu_debuglink TThh !$84oHH`> @FNo([o j 88s @@ |XX#w@TThhpp,  0 &:PK!S-openbm/bin/python2ELF4L4 (444  TTTPP04hhhDDPtdppp,,QtdRtd/lib/ld-linux.so.2GNUGNU10^yL2gƩz     @ BEj Cֻ| : K2bXqXj|  n1 @ T $ lah ,X PTlibpython2.7.so.1.0_ITM_deregisterTMCloneTable__gmon_start___Jv_RegisterClasses_ITM_registerTMCloneTablePy_Mainlibpthread.so.0libc.so.6_IO_stdin_used__libc_start_main_edata__bss_start_end__libc_csu_fini__data_start__libc_csu_init_fp_hwGLIBC_2.0ii  Sßt*[5%% h%h%hUSD4E UD$$]1^PTRhPhQVhf$ffffff#- wøtU$ Ðt& - uútUD$$ Ð&= uU| fttU$ytffUW1VS%l$0U)t'D$8,$D$D$4D$9u߃[^_] fSã[;(DPhpzR|  @F J tx?;*2$" @,AB Dd 8daAA CAN0HAA AAv X ToH  @8oooOsPWGpython2.7.debug .shstrtab.interp.note.ABI-tag.note.gnu.build-id.gnu.hash.dynsym.dynstr.gnu.version.gnu.version_r.rel.dyn.rel.plt.init.text.fini.rodata.eh_frame_hdr.eh_frame.init_array.fini_array.jcr.dynamic.got.got.plt.data.bss.comment.SUSE.OPTs.gnu_debuglink TThh !$84oHH`> @FNo([o j 88s @@ |XX#w@TThhpp,  0 &:PK!S-openbm/bin/python2.7ELF4L4 (444  TTTPP04hhhDDPtdppp,,QtdRtd/lib/ld-linux.so.2GNUGNU10^yL2gƩz     @ BEj Cֻ| : K2bXqXj|  n1 @ T $ lah ,X PTlibpython2.7.so.1.0_ITM_deregisterTMCloneTable__gmon_start___Jv_RegisterClasses_ITM_registerTMCloneTablePy_Mainlibpthread.so.0libc.so.6_IO_stdin_used__libc_start_main_edata__bss_start_end__libc_csu_fini__data_start__libc_csu_init_fp_hwGLIBC_2.0ii  Sßt*[5%% h%h%hUSD4E UD$$]1^PTRhPhQVhf$ffffff#- wøtU$ Ðt& - uútUD$$ Ð&= uU| fttU$ytffUW1VS%l$0U)t'D$8,$D$D$4D$9u߃[^_] fSã[;(DPhpzR|  @F J tx?;*2$" @,AB Dd 8daAA CAN0HAA AAv X ToH  @8oooOsPWGpython2.7.debug .shstrtab.interp.note.ABI-tag.note.gnu.build-id.gnu.hash.dynsym.dynstr.gnu.version.gnu.version_r.rel.dyn.rel.plt.init.text.fini.rodata.eh_frame_hdr.eh_frame.init_array.fini_array.jcr.dynamic.got.got.plt.data.bss.comment.SUSE.OPTs.gnu_debuglink TThh !$84oHH`> @FNo([o j 88s @@ |XX#w@TThhpp,  0 &:PK!, bbopenbm/bin/rpyc_classic.py#!/home/ocurero/projects/openbatchman/src/bin/python """ classic rpyc server (threaded, forking or std) running a SlaveService usage: rpyc_classic.py # default settings rpyc_classic.py -m forking -p 12345 # custom settings # ssl-authenticated server (keyfile and certfile are required) rpyc_classic.py --ssl-keyfile keyfile.pem --ssl-certfile certfile.pem --ssl-cafile cafile.pem """ import sys import os import rpyc from plumbum import cli from rpyc.utils.server import ThreadedServer, ForkingServer, OneShotServer from rpyc.utils.classic import DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT from rpyc.utils.registry import REGISTRY_PORT from rpyc.utils.registry import UDPRegistryClient, TCPRegistryClient from rpyc.utils.authenticators import SSLAuthenticator from rpyc.lib import setup_logger from rpyc.core import SlaveService class ClassicServer(cli.Application): mode = cli.SwitchAttr(["-m", "--mode"], cli.Set("threaded", "forking", "stdio", "oneshot"), default = "threaded", help = "The serving mode (threaded, forking, or 'stdio' for " "inetd, etc.)") port = cli.SwitchAttr(["-p", "--port"], cli.Range(0, 65535), default = None, help="The TCP listener port (default = %s, default for SSL = %s)" % (DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT), group = "Socket Options") host = cli.SwitchAttr(["--host"], str, default = "", help = "The host to bind to. " "The default is INADDR_ANY", group = "Socket Options") ipv6 = cli.Flag(["--ipv6"], help = "Enable IPv6", group = "Socket Options") logfile = cli.SwitchAttr("--logfile", str, default = None, help="Specify the log file to use; " "the default is stderr", group = "Logging") quiet = cli.Flag(["-q", "--quiet"], help = "Quiet mode (only errors will be logged)", group = "Logging") ssl_keyfile = cli.SwitchAttr("--ssl-keyfile", cli.ExistingFile, help = "The keyfile to use for SSL. Required for SSL", group = "SSL", requires = ["--ssl-certfile"]) ssl_certfile = cli.SwitchAttr("--ssl-certfile", cli.ExistingFile, help = "The certificate file to use for SSL. Required for SSL", group = "SSL", requires = ["--ssl-keyfile"]) ssl_cafile = cli.SwitchAttr("--ssl-cafile", cli.ExistingFile, help = "The certificate authority chain file to use for SSL. Optional; enables client-side " "authentication", group = "SSL", requires = ["--ssl-keyfile"]) auto_register = cli.Flag("--register", help = "Asks the server to attempt registering with " "a registry server. By default, the server will not attempt to register", group = "Registry") registry_type = cli.SwitchAttr("--registry-type", cli.Set("UDP", "TCP"), default = "UDP", help="Specify a UDP or TCP registry", group = "Registry") registry_port = cli.SwitchAttr("--registry-port", cli.Range(0, 65535), default=REGISTRY_PORT, help = "The registry's UDP/TCP port", group = "Registry") registry_host = cli.SwitchAttr("--registry-host", str, default = None, help = "The registry host machine. For UDP, the default is 255.255.255.255; " "for TCP, a value is required", group = "Registry") def main(self): if self.registry_type == "UDP": if self.registry_host is None: self.registry_host = "255.255.255.255" self.registrar = UDPRegistryClient(ip = self.registry_host, port = self.registry_port) else: if self.registry_host is None: raise ValueError("With TCP registry, you must specify --registry-host") self.registrar = TCPRegistryClient(ip = self.registry_host, port = self.registry_port) if self.ssl_keyfile: self.authenticator = SSLAuthenticator(self.ssl_keyfile, self.ssl_certfile, self.ssl_cafile) default_port = DEFAULT_SERVER_SSL_PORT else: self.authenticator = None default_port = DEFAULT_SERVER_PORT if self.port is None: self.port = default_port setup_logger(self.quiet, self.logfile) if self.mode == "threaded": self._serve_mode(ThreadedServer) elif self.mode == "forking": self._serve_mode(ForkingServer) elif self.mode == "oneshot": self._serve_oneshot() elif self.mode == "stdio": self._serve_stdio() def _serve_mode(self, factory): t = factory(SlaveService, hostname = self.host, port = self.port, reuse_addr = True, ipv6 = self.ipv6, authenticator = self.authenticator, registrar = self.registrar, auto_register = self.auto_register) t.start() def _serve_oneshot(self): t = OneShotServer(SlaveService, hostname = self.host, port = self.port, reuse_addr = True, ipv6 = self.ipv6, authenticator = self.authenticator, registrar = self.registrar, auto_register = self.auto_register) sys.stdout.write("rpyc-oneshot\n") sys.stdout.write("%s\t%s\n" % (t.host, t.port)) sys.stdout.flush() t.start() def _serve_stdio(self): origstdin = sys.stdin origstdout = sys.stdout sys.stdin = open(os.devnull, "r") sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") conn = rpyc.classic.connect_pipes(origstdin, origstdout) try: try: conn.serve_all() except KeyboardInterrupt: print( "User interrupt!" ) finally: conn.close() if __name__ == "__main__": ClassicServer.run() PK!a6openbm/bin/rpyc_registry.py#!/home/ocurero/projects/openbatchman/src/bin/python """ The registry server listens to broadcasts on UDP port 18812, answering to discovery queries by clients and registering keepalives from all running servers. In order for clients to use discovery, a registry service must be running somewhere on their local network. """ from plumbum import cli from rpyc.utils.registry import REGISTRY_PORT, DEFAULT_PRUNING_TIMEOUT from rpyc.utils.registry import UDPRegistryServer, TCPRegistryServer from rpyc.lib import setup_logger class RegistryServer(cli.Application): mode = cli.SwitchAttr(["-m", "--mode"], cli.Set("UDP", "TCP"), default = "UDP", help = "Serving mode") ipv6 = cli.Flag(["-6", "--ipv6"], help="use ipv6 instead of ipv4") port = cli.SwitchAttr(["-p", "--port"], cli.Range(0, 65535), default = REGISTRY_PORT, help = "The UDP/TCP listener port") logfile = cli.SwitchAttr(["--logfile"], str, default = None, help = "The log file to use; the default is stderr") quiet = cli.SwitchAttr(["-q", "--quiet"], help = "Quiet mode (only errors are logged)") pruning_timeout = cli.SwitchAttr(["-t", "--timeout"], int, default = DEFAULT_PRUNING_TIMEOUT, help = "Set a custom pruning timeout (in seconds)") def main(self): if self.mode == "UDP": server = UDPRegistryServer(host = '::' if self.ipv6 else '0.0.0.0', port = self.port, pruning_timeout = self.pruning_timeout) elif self.mode == "TCP": server = TCPRegistryServer(port = self.port, pruning_timeout = self.pruning_timeout) setup_logger(self.quiet, self.logfile) server.start() if __name__ == "__main__": RegistryServer.run() PK!,ttopenbm/bin/rst2html.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing HTML. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates (X)HTML documents from standalone reStructuredText ' 'sources. ' + default_description) publish_cmdline(writer_name='html', description=description) PK!Q99openbm/bin/rst2latex.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing LaTeX. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline description = ('Generates LaTeX documents from standalone reStructuredText ' 'sources. ' 'Reads from (default is stdin) and writes to ' ' (default is stdout). See ' ' for ' 'the full reference.') publish_cmdline(writer_name='latex', description=description) PK!eYzzopenbm/bin/rst2man.py#!/home/ocurero/projects/openbatchman/src/bin/python # Author: # Contact: grubert@users.sf.net # Copyright: This module has been placed in the public domain. """ man.py ====== This module provides a simple command line interface that uses the man page writer to output from ReStructuredText source. """ import locale try: locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description from docutils.writers import manpage description = ("Generates plain unix manual documents. " + default_description) publish_cmdline(writer=manpage.Writer(), description=description) PK!W?`openbm/bin/rst2odt.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $ # Author: Dave Kuhlman # Copyright: This module has been placed in the public domain. """ A front end to the Docutils Publisher, producing OpenOffice documents. """ import sys try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline_to_binary, default_description from docutils.writers.odf_odt import Writer, Reader description = ('Generates OpenDocument/OpenOffice/ODF documents from ' 'standalone reStructuredText sources. ' + default_description) writer = Writer() reader = Reader() output = publish_cmdline_to_binary(reader=reader, writer=writer, description=description) PK!<*  openbm/bin/rst2odt_prepstyles.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $ # Author: Dave Kuhlman # Copyright: This module has been placed in the public domain. """ Fix a word-processor-generated styles.odt for odtwriter use: Drop page size specifications from styles.xml in STYLE_FILE.odt. """ # # Author: Michael Schutte from lxml import etree import sys import zipfile from tempfile import mkstemp import shutil import os NAMESPACES = { "style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0", "fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0" } def prepstyle(filename): zin = zipfile.ZipFile(filename) styles = zin.read("styles.xml") root = etree.fromstring(styles) for el in root.xpath("//style:page-layout-properties", namespaces=NAMESPACES): for attr in el.attrib: if attr.startswith("{%s}" % NAMESPACES["fo"]): del el.attrib[attr] tempname = mkstemp() zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w", zipfile.ZIP_DEFLATED) for item in zin.infolist(): if item.filename == "styles.xml": zout.writestr(item, etree.tostring(root)) else: zout.writestr(item, zin.read(item.filename)) zout.close() zin.close() shutil.move(tempname[1], filename) def main(): args = sys.argv[1:] if len(args) != 1: print >> sys.stderr, __doc__ print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0] sys.exit(1) filename = args[0] prepstyle(filename) if __name__ == '__main__': main() # vim:tw=78:sw=4:sts=4:et: PK!{{openbm/bin/rst2pseudoxml.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing pseudo-XML. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates pseudo-XML from standalone reStructuredText ' 'sources (for testing purposes). ' + default_description) publish_cmdline(description=description) PK!Copenbm/bin/rst2s5.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: Chris Liechti # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing HTML slides using the S5 template system. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates S5 (X)HTML slideshow documents from standalone ' 'reStructuredText sources. ' + default_description) publish_cmdline(writer_name='s5', description=description) PK!zR44openbm/bin/rst2xetex.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2xetex.py 7038 2011-05-19 09:12:02Z milde $ # Author: Guenter Milde # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing XeLaTeX source code. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline description = ('Generates XeLaTeX documents from standalone reStructuredText ' 'sources. ' 'Reads from (default is stdin) and writes to ' ' (default is stdout). See ' ' for ' 'the full reference.') publish_cmdline(writer_name='xetex', description=description) PK!D1||openbm/bin/rst2xml.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing Docutils XML. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates Docutils-native XML from standalone ' 'reStructuredText sources. ' + default_description) publish_cmdline(writer_name='xml', description=description) PK!`}openbm/bin/rstpep2html.py#!/home/ocurero/projects/openbatchman/src/bin/python # $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing HTML from PEP (Python Enhancement Proposal) documents. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates (X)HTML from reStructuredText-format PEP files. ' + default_description) publish_cmdline(reader_name='pep', writer_name='pep_html', description=description) PK!rropenbm/dat a/gnodes.db](namenodee.K.}. }name}s.}(nameNnodeNu.PK!&openbm/dat a/joblogs/Prueba1R163722340PK!:_&openbm/dat a/joblogs/Prueba1R166629680Start joblog logging PK!ᰵ&openbm/dat a/joblogs/Prueba1R1886771402018-07-07 18:48:34,077 - INFO - Job Prueba1R18867714 started on node foobar 2018-07-07 18:48:34,077 - INFO - Step step0 started 2018-07-07 18:48:40,090 - INFO - Step step1 started PK!&G&openbm/dat a/joblogs/Prueba1R1897614002018-07-08 21:09:00,170 - INFO - Job Prueba1R18976140 started on node foobar 2018-07-08 21:09:00,170 - INFO - Step step0 started 2018-07-08 21:09:06,184 - INFO - Step step1 started PK!B6&openbm/dat a/joblogs/Prueba1R1897644502018-07-08 21:14:05,107 - INFO - Job Prueba1R18976445 started on node foobar 2018-07-08 21:14:05,107 - INFO - Step step0 started 2018-07-08 21:14:11,120 - INFO - Step step1 started PK!W.w.openbm/dat a/joblogs/Prueba_plan_1minI105084002019-04-15 19:30:00,045 - INFO - Job Prueba_plan_1minI10508400 started on node foobar 2019-04-15 19:30:00,045 - INFO - Step step0 started 2019-04-15 19:30:06,055 - INFO - Step step1 started PK!B5.openbm/dat a/joblogs/Prueba_plan_1minI105084602019-04-15 19:29:00,076 - INFO - Job Prueba_plan_1minI10508460 started on node foobar 2019-04-15 19:29:00,076 - INFO - Step step0 started 2019-04-15 19:29:06,086 - INFO - Step step1 started PK!ˀ.openbm/dat a/joblogs/Prueba_plan_1minI105085202019-04-15 19:28:00,061 - INFO - Job Prueba_plan_1minI10508520 started on node foobar 2019-04-15 19:28:00,061 - INFO - Step step0 started 2019-04-15 19:28:06,071 - INFO - Step step1 started PK!.openbm/dat a/joblogs/Prueba_plan_1minI105085802019-04-15 19:27:00,051 - INFO - Job Prueba_plan_1minI10508580 started on node foobar 2019-04-15 19:27:00,051 - INFO - Step step0 started 2019-04-15 19:27:06,061 - INFO - Step step1 started PK!t.openbm/dat a/joblogs/Prueba_plan_1minI105086402019-04-15 19:26:00,062 - INFO - Job Prueba_plan_1minI10508640 started on node foobar 2019-04-15 19:26:00,062 - INFO - Step step0 started 2019-04-15 19:26:06,076 - INFO - Step step1 started PK!zK.openbm/dat a/joblogs/Prueba_plan_1minI105087602019-04-15 19:24:00,056 - INFO - Job Prueba_plan_1minI10508760 started on node foobar 2019-04-15 19:24:00,056 - INFO - Step step0 started 2019-04-15 19:24:06,071 - INFO - Step step1 started PK!U.openbm/dat a/joblogs/Prueba_plan_1minI105088202019-04-15 19:23:00,061 - INFO - Job Prueba_plan_1minI10508820 started on node foobar 2019-04-15 19:23:00,061 - INFO - Step step0 started 2019-04-15 19:23:06,075 - INFO - Step step1 started PK!塐.openbm/dat a/joblogs/Prueba_plan_1minI105088802019-04-15 19:22:00,089 - INFO - Job Prueba_plan_1minI10508880 started on node foobar 2019-04-15 19:22:00,090 - INFO - Step step0 started 2019-04-15 19:22:06,102 - INFO - Step step1 started PK!l.openbm/dat a/joblogs/Prueba_plan_1minI105090002019-04-15 19:20:00,073 - INFO - Job Prueba_plan_1minI10509000 started on node foobar 2019-04-15 19:20:00,073 - INFO - Step step0 started 2019-04-15 19:20:06,090 - INFO - Step step1 started PK!0 \.openbm/dat a/joblogs/Prueba_plan_1minI105091202019-04-15 19:18:00,072 - INFO - Job Prueba_plan_1minI10509120 started on node foobar 2019-04-15 19:18:00,072 - INFO - Step step0 started 2019-04-15 19:18:06,117 - INFO - Step step1 started PK!0k.openbm/dat a/joblogs/Prueba_plan_1minI105093002019-04-15 19:15:00,074 - INFO - Job Prueba_plan_1minI10509300 started on node foobar 2019-04-15 19:15:00,075 - INFO - Step step0 started 2019-04-15 19:15:06,088 - INFO - Step step1 started PK!rd˾.openbm/dat a/joblogs/Prueba_plan_1minI105099602019-04-15 19:04:00,044 - INFO - Job Prueba_plan_1minI10509960 started on node foobar 2019-04-15 19:04:00,044 - INFO - Step step0 started 2019-04-15 19:04:06,060 - INFO - Step step1 started PK!`s .openbm/dat a/joblogs/Prueba_plan_1minI105108602019-04-15 18:49:00,082 - INFO - Job Prueba_plan_1minI10510860 started on node foobar 2019-04-15 18:49:00,082 - INFO - Step step0 started 2019-04-15 18:49:06,094 - INFO - Step step1 started PK!R .openbm/dat a/joblogs/Prueba_plan_1minI105111602019-04-15 18:44:00,115 - INFO - Job Prueba_plan_1minI10511160 started on node foobar 2019-04-15 18:44:00,115 - INFO - Step step0 started 2019-04-15 18:44:06,128 - INFO - Step step1 started PK!Q{!nn-openbm/dat a/joblogs/Prueba_plan_1minI55035402019-02-24 19:17:00,090 - INFO - Job Prueba_plan_1minI5503540 started on node foobar 2019-02-24 19:17:00,090 - INFO - Step step0 started 2019-02-24 19:17:06,102 - INFO - Step step1 started 2019-02-24 19:23:00,096 - INFO - Job Prueba_plan_1minI5503540 started on node foobar 2019-02-24 19:23:00,096 - INFO - Step step0 started 2019-02-24 19:23:06,108 - INFO - Step step1 started 2019-02-24 19:28:00,107 - INFO - Job Prueba_plan_1minI5503540 started on node foobar 2019-02-24 19:28:00,107 - INFO - Step step0 started 2019-02-24 19:28:06,122 - INFO - Step step1 started 2019-02-24 19:33:00,109 - INFO - Job Prueba_plan_1minI5503540 started on node foobar 2019-02-24 19:33:00,110 - INFO - Step step0 started 2019-02-24 19:33:06,123 - INFO - Step step1 started 2019-02-24 19:47:00,165 - INFO - Job Prueba_plan_1minI5503540 started on node foobar 2019-02-24 19:47:00,165 - INFO - Step step0 started 2019-02-24 19:47:06,177 - INFO - Step step1 started 2019-02-24 19:51:00,146 - INFO - Job Prueba_plan_1minI5503540 started on node foobar 2019-02-24 19:51:00,147 - INFO - Step step0 started 2019-02-24 19:51:06,175 - INFO - Step step1 started PK!Y{zz-openbm/dat a/joblogs/Prueba_plan_1minI55035702019-02-24 19:41:00,138 - INFO - Job Prueba_plan_1minI5503570 started on node foobar 2019-02-24 19:41:00,138 - INFO - Step step0 started 2019-02-24 19:41:06,152 - INFO - Step step1 started 2019-02-24 19:44:00,107 - INFO - Job Prueba_plan_1minI5503570 started on node foobar 2019-02-24 19:44:00,107 - INFO - Step step0 started 2019-02-24 19:44:06,118 - INFO - Step step1 started PK!-openbm/dat a/joblogs/Prueba_plan_1minI77035402019-03-18 19:51:00,099 - INFO - Job Prueba_plan_1minI7703540 started on node foobar 2019-03-18 19:51:00,099 - INFO - Step step0 started 2019-03-18 19:51:06,109 - INFO - Step step1 started PK!淾.openbm/dat a/joblogs/Prueba_plan_1minI78-828592019-03-19 19:51:00,116 - INFO - Job Prueba_plan_1minI78-82859 started on node foobar 2019-03-19 19:51:00,116 - INFO - Step step0 started 2019-03-19 19:51:06,187 - INFO - Step step1 started PK!9/openbm/dat a/joblogs/Prueba_plan_1minI79-1692592019-03-20 19:51:00,132 - INFO - Job Prueba_plan_1minI79-169259 started on node foobar 2019-03-20 19:51:00,132 - INFO - Step step0 started 2019-03-20 19:51:06,160 - INFO - Step step1 started PK!K/openbm/dat a/joblogs/Prueba_plan_1minI80-2556592019-03-21 19:51:00,115 - INFO - Job Prueba_plan_1minI80-255659 started on node foobar 2019-03-21 19:51:00,122 - INFO - Step step0 started 2019-03-21 19:51:06,183 - INFO - Step step1 started PK!D /openbm/dat a/joblogs/Prueba_plan_1minI81-3420592019-03-22 19:51:00,118 - INFO - Job Prueba_plan_1minI81-342059 started on node foobar 2019-03-22 19:51:00,118 - INFO - Step step0 started 2019-03-22 19:51:06,145 - INFO - Step step1 started PK!V/openbm/dat a/joblogs/Prueba_plan_1minI82-4284592019-03-23 19:51:00,108 - INFO - Job Prueba_plan_1minI82-428459 started on node foobar 2019-03-23 19:51:00,108 - INFO - Step step0 started 2019-03-23 19:51:06,161 - INFO - Step step1 started PK![Ͽ/openbm/dat a/joblogs/Prueba_plan_1minI83-5148592019-03-24 19:51:00,114 - INFO - Job Prueba_plan_1minI83-514859 started on node foobar 2019-03-24 19:51:00,114 - INFO - Step step0 started 2019-03-24 19:51:06,171 - INFO - Step step1 started PK!#ؿ/openbm/dat a/joblogs/Prueba_plan_1minI84-6012592019-03-25 19:51:00,068 - INFO - Job Prueba_plan_1minI84-601259 started on node foobar 2019-03-25 19:51:00,068 - INFO - Step step0 started 2019-03-25 19:51:06,104 - INFO - Step step1 started PK!E6/openbm/dat a/joblogs/Prueba_plan_1minI85-6876592019-03-26 19:51:00,075 - INFO - Job Prueba_plan_1minI85-687659 started on node foobar 2019-03-26 19:51:00,075 - INFO - Step step0 started 2019-03-26 19:51:06,113 - INFO - Step step1 started PK!CF.openbm/dat a/joblogs/Prueba_plan_2minI105086402019-04-15 19:26:00,098 - INFO - Job Prueba_plan_2minI10508640 started on node foobar 2019-04-15 19:26:00,098 - INFO - Step step0 started 2019-04-15 19:26:06,122 - INFO - Step step1 started PK!_Ծ.openbm/dat a/joblogs/Prueba_plan_2minI105087602019-04-15 19:24:00,092 - INFO - Job Prueba_plan_2minI10508760 started on node foobar 2019-04-15 19:24:00,092 - INFO - Step step0 started 2019-04-15 19:24:06,101 - INFO - Step step1 started PK!Ԍ.openbm/dat a/joblogs/Prueba_plan_2minI105088202019-04-15 19:23:00,093 - INFO - Job Prueba_plan_2minI10508820 started on node foobar 2019-04-15 19:23:00,093 - INFO - Step step0 started 2019-04-15 19:23:06,103 - INFO - Step step1 started PK!:x.openbm/dat a/joblogs/Prueba_plan_2minI105088802019-04-15 19:22:00,123 - INFO - Job Prueba_plan_2minI10508880 started on node foobar 2019-04-15 19:22:00,123 - INFO - Step step0 started 2019-04-15 19:22:06,131 - INFO - Step step1 started PK!.openbm/dat a/joblogs/Prueba_plan_2minI105090002019-04-15 19:20:00,112 - INFO - Job Prueba_plan_2minI10509000 started on node foobar 2019-04-15 19:20:00,112 - INFO - Step step0 started 2019-04-15 19:20:06,121 - INFO - Step step1 started PK!* .openbm/dat a/joblogs/Prueba_plan_2minI105091202019-04-15 19:18:00,108 - INFO - Job Prueba_plan_2minI10509120 started on node foobar 2019-04-15 19:18:00,108 - INFO - Step step0 started 2019-04-15 19:18:06,126 - INFO - Step step1 started PK!@.openbm/dat a/joblogs/Prueba_plan_2minI105093002019-04-15 19:15:00,111 - INFO - Job Prueba_plan_2minI10509300 started on node foobar 2019-04-15 19:15:00,112 - INFO - Step step0 started 2019-04-15 19:15:06,121 - INFO - Step step1 started PK!%K8.openbm/dat a/joblogs/Prueba_plan_2minI105099602019-04-15 19:04:00,084 - INFO - Job Prueba_plan_2minI10509960 started on node foobar 2019-04-15 19:04:00,084 - INFO - Step step0 started 2019-04-15 19:04:06,100 - INFO - Step step1 started PK!P!.openbm/dat a/joblogs/Prueba_plan_2minI105108602019-04-15 18:49:00,118 - INFO - Job Prueba_plan_2minI10510860 started on node foobar 2019-04-15 18:49:00,119 - INFO - Step step0 started 2019-04-15 18:49:06,148 - INFO - Step step1 started PK! .openbm/dat a/joblogs/Prueba_plan_2minI105111602019-04-15 18:44:00,175 - INFO - Job Prueba_plan_2minI10511160 started on node foobar 2019-04-15 18:44:00,175 - INFO - Step step0 started 2019-04-15 18:44:06,184 - INFO - Step step1 started PK!mL.openbm/dat a/joblogs/Prueba_plan_2minI410090002019-02-10 19:04:00,065 - INFO - Job Prueba_plan_2minI4100900 started on node foobar 2019-02-10 19:04:00,065 - INFO - Step step0 started 2019-02-10 19:04:06,076 - INFO - Step step1 started PK!=.openbm/dat a/joblogs/Prueba_plan_2minI410114002019-02-10 19:00:00,100 - INFO - Job Prueba_plan_2minI4101140 started on node foobar 2019-02-10 19:00:00,101 - INFO - Step step0 started 2019-02-10 19:00:06,112 - INFO - Step step1 started PK!({.openbm/dat a/joblogs/Prueba_plan_2minI410150002019-02-10 18:54:00,070 - INFO - Job Prueba_plan_2minI4101500 started on node foobar 2019-02-10 18:54:00,070 - INFO - Step step0 started 2019-02-10 18:54:06,079 - INFO - Step step1 started PK!NA.openbm/dat a/joblogs/Prueba_plan_2minI410162002019-02-10 18:52:00,082 - INFO - Job Prueba_plan_2minI4101620 started on node foobar 2019-02-10 18:52:00,082 - INFO - Step step0 started 2019-02-10 18:52:06,094 - INFO - Step step1 started PK!ʽ.openbm/dat a/joblogs/Prueba_plan_2minI410258002019-02-10 18:36:00,091 - INFO - Job Prueba_plan_2minI4102580 started on node foobar 2019-02-10 18:36:00,091 - INFO - Step step0 started 2019-02-10 18:36:06,104 - INFO - Step step1 started PK!PC.openbm/dat a/joblogs/Prueba_plan_2minI410282002019-02-10 18:32:00,086 - INFO - Job Prueba_plan_2minI4102820 started on node foobar 2019-02-10 18:32:00,086 - INFO - Step step0 started 2019-02-10 18:32:06,097 - INFO - Step step1 started PK!̇.openbm/dat a/joblogs/Prueba_plan_2minI410294002019-02-10 18:30:00,087 - INFO - Job Prueba_plan_2minI4102940 started on node foobar 2019-02-10 18:30:00,088 - INFO - Step step0 started 2019-02-10 18:30:06,097 - INFO - Step step1 started PK!>XNϽ.openbm/dat a/joblogs/Prueba_plan_2minI410306002019-02-10 18:28:00,085 - INFO - Job Prueba_plan_2minI4103060 started on node foobar 2019-02-10 18:28:00,085 - INFO - Step step0 started 2019-02-10 18:28:06,095 - INFO - Step step1 started PK!|E.openbm/dat a/joblogs/Prueba_plan_2minI410318002019-02-10 18:26:00,084 - INFO - Job Prueba_plan_2minI4103180 started on node foobar 2019-02-10 18:26:00,084 - INFO - Step step0 started 2019-02-10 18:26:06,097 - INFO - Step step1 started PK!7މ.openbm/dat a/joblogs/Prueba_plan_2minI410330002019-02-10 18:24:00,057 - INFO - Job Prueba_plan_2minI4103300 started on node foobar 2019-02-10 18:24:00,057 - INFO - Step step0 started PK!QK.openbm/dat a/joblogs/Prueba_plan_2minI410390002019-02-10 18:14:00,070 - INFO - Job Prueba_plan_2minI4103900 started on node foobar 2019-02-10 18:14:00,070 - INFO - Step step0 started 2019-02-10 18:14:06,082 - INFO - Step step1 started PK!ْC.openbm/dat a/joblogs/Prueba_plan_2minI410450002019-02-10 18:04:00,089 - INFO - Job Prueba_plan_2minI4104500 started on node foobar 2019-02-10 18:04:00,089 - INFO - Step step0 started 2019-02-10 18:04:06,100 - INFO - Step step1 started PK!Mb.openbm/dat a/joblogs/Prueba_plan_2minI410462002019-02-10 18:02:00,056 - INFO - Job Prueba_plan_2minI4104620 started on node foobar 2019-02-10 18:02:00,056 - INFO - Step step0 started PK!)UU.openbm/dat a/joblogs/Prueba_plan_2minI410486002019-02-10 17:58:00,057 - INFO - Job Prueba_plan_2minI4104860 started on node foobar PK!U>.openbm/dat a/joblogs/Prueba_plan_2minI431884002019-02-12 14:05:00,070 - INFO - Job Prueba_plan_2minI4318840 started on node foobar 2019-02-12 14:05:00,071 - INFO - Step step0 started 2019-02-12 14:05:06,083 - INFO - Step step1 started PK!-openbm/dat a/joblogs/Prueba_plan_2minI55029402019-02-24 18:30:00,051 - INFO - Job Prueba_plan_2minI5502940 started on node foobar 2019-02-24 18:30:00,051 - INFO - Step step0 started 2019-02-24 18:30:06,060 - INFO - Step step1 started PK!E$@-openbm/dat a/joblogs/Prueba_plan_2minI55030002019-02-24 18:29:00,062 - INFO - Job Prueba_plan_2minI5503000 started on node foobar 2019-02-24 18:29:00,063 - INFO - Step step0 started 2019-02-24 18:29:06,075 - INFO - Step step1 started PK!y-openbm/dat a/joblogs/Prueba_plan_2minI55031202019-02-24 18:27:00,060 - INFO - Job Prueba_plan_2minI5503120 started on node foobar 2019-02-24 18:27:00,060 - INFO - Step step0 started 2019-02-24 18:27:06,084 - INFO - Step step1 started PK!'~nn-openbm/dat a/joblogs/Prueba_plan_2minI55035402019-02-24 19:17:00,122 - INFO - Job Prueba_plan_2minI5503540 started on node foobar 2019-02-24 19:17:00,122 - INFO - Step step0 started 2019-02-24 19:17:06,148 - INFO - Step step1 started 2019-02-24 19:23:00,131 - INFO - Job Prueba_plan_2minI5503540 started on node foobar 2019-02-24 19:23:00,131 - INFO - Step step0 started 2019-02-24 19:23:06,140 - INFO - Step step1 started 2019-02-24 19:28:00,150 - INFO - Job Prueba_plan_2minI5503540 started on node foobar 2019-02-24 19:28:00,150 - INFO - Step step0 started 2019-02-24 19:28:06,160 - INFO - Step step1 started 2019-02-24 19:33:00,147 - INFO - Job Prueba_plan_2minI5503540 started on node foobar 2019-02-24 19:33:00,148 - INFO - Step step0 started 2019-02-24 19:33:06,157 - INFO - Step step1 started 2019-02-24 19:47:00,189 - INFO - Job Prueba_plan_2minI5503540 started on node foobar 2019-02-24 19:47:00,189 - INFO - Step step0 started 2019-02-24 19:47:06,205 - INFO - Step step1 started 2019-02-24 19:51:00,198 - INFO - Job Prueba_plan_2minI5503540 started on node foobar 2019-02-24 19:51:00,198 - INFO - Step step0 started 2019-02-24 19:51:06,241 - INFO - Step step1 started PK!~zz-openbm/dat a/joblogs/Prueba_plan_2minI55035702019-02-24 19:41:00,168 - INFO - Job Prueba_plan_2minI5503570 started on node foobar 2019-02-24 19:41:00,169 - INFO - Step step0 started 2019-02-24 19:41:06,178 - INFO - Step step1 started 2019-02-24 19:44:00,134 - INFO - Job Prueba_plan_2minI5503570 started on node foobar 2019-02-24 19:44:00,134 - INFO - Step step0 started 2019-02-24 19:44:06,143 - INFO - Step step1 started PK!+B*-openbm/dat a/joblogs/Prueba_plan_2minI77035402019-03-18 19:51:00,129 - INFO - Job Prueba_plan_2minI7703540 started on node foobar 2019-03-18 19:51:00,129 - INFO - Step step0 started 2019-03-18 19:51:06,138 - INFO - Step step1 started PK!|G.openbm/dat a/joblogs/Prueba_plan_2minI78-828592019-03-19 19:51:00,218 - INFO - Job Prueba_plan_2minI78-82859 started on node foobar 2019-03-19 19:51:00,219 - INFO - Step step0 started 2019-03-19 19:51:06,229 - INFO - Step step1 started PK!s/openbm/dat a/joblogs/Prueba_plan_2minI79-1692592019-03-20 19:51:00,184 - INFO - Job Prueba_plan_2minI79-169259 started on node foobar 2019-03-20 19:51:00,184 - INFO - Step step0 started 2019-03-20 19:51:06,191 - INFO - Step step1 started PK!9/openbm/dat a/joblogs/Prueba_plan_2minI80-2556592019-03-21 19:51:00,200 - INFO - Job Prueba_plan_2minI80-255659 started on node foobar 2019-03-21 19:51:00,200 - INFO - Step step0 started 2019-03-21 19:51:06,210 - INFO - Step step1 started PK!PQ/openbm/dat a/joblogs/Prueba_plan_2minI81-3420592019-03-22 19:51:00,169 - INFO - Job Prueba_plan_2minI81-342059 started on node foobar 2019-03-22 19:51:00,169 - INFO - Step step0 started 2019-03-22 19:51:06,177 - INFO - Step step1 started PK!m/openbm/dat a/joblogs/Prueba_plan_2minI82-4284592019-03-23 19:51:00,168 - INFO - Job Prueba_plan_2minI82-428459 started on node foobar 2019-03-23 19:51:00,168 - INFO - Step step0 started 2019-03-23 19:51:06,191 - INFO - Step step1 started PK!o/openbm/dat a/joblogs/Prueba_plan_2minI83-5148592019-03-24 19:51:00,184 - INFO - Job Prueba_plan_2minI83-514859 started on node foobar 2019-03-24 19:51:00,184 - INFO - Step step0 started 2019-03-24 19:51:06,195 - INFO - Step step1 started PK!h,ѿ/openbm/dat a/joblogs/Prueba_plan_2minI84-6012592019-03-25 19:51:00,113 - INFO - Job Prueba_plan_2minI84-601259 started on node foobar 2019-03-25 19:51:00,114 - INFO - Step step0 started 2019-03-25 19:51:06,129 - INFO - Step step1 started PK!Q/openbm/dat a/joblogs/Prueba_plan_2minI85-6876592019-03-26 19:51:00,128 - INFO - Job Prueba_plan_2minI85-687659 started on node foobar 2019-03-26 19:51:00,128 - INFO - Step step0 started 2019-03-26 19:51:06,149 - INFO - Step step1 started PK!4{-openbm/dat a/joblogs/Prueba_plan_2minR97049812019-04-07 20:26:58,900 - INFO - Job Prueba_plan_2minR9704981 started on node foobar 2019-04-07 20:26:58,900 - INFO - Step step0 started 2019-04-07 20:27:04,913 - INFO - Step step1 started PK!-openbm/dat a/joblogs/Prueba_plan_2minR97052082019-04-07 20:23:11,770 - INFO - Job Prueba_plan_2minR9705208 started on node foobar 2019-04-07 20:23:11,770 - INFO - Step step0 started 2019-04-07 20:23:17,784 - INFO - Step step1 started PK!FJ-openbm/dat a/joblogs/Prueba_plan_2minR97055652019-04-07 20:17:14,665 - INFO - Job Prueba_plan_2minR9705565 started on node foobar 2019-04-07 20:17:14,665 - INFO - Step step0 started 2019-04-07 20:17:20,679 - INFO - Step step1 started PK!aS-openbm/dat a/joblogs/Prueba_plan_2minR97061762019-04-07 20:07:04,743 - INFO - Job Prueba_plan_2minR9706176 started on node foobar 2019-04-07 20:07:04,744 - INFO - Step step0 started 2019-04-07 20:07:10,763 - INFO - Step step1 started PK!T-openbm/dat a/joblogs/Prueba_plan_2minR97064052019-04-07 20:03:15,476 - INFO - Job Prueba_plan_2minR9706405 started on node foobar 2019-04-07 20:03:15,476 - INFO - Step step0 started 2019-04-07 20:03:21,493 - INFO - Step step1 started PK!c-openbm/dat a/joblogs/Prueba_plan_2minR97066722019-04-07 19:58:47,515 - INFO - Job Prueba_plan_2minR9706672 started on node foobar 2019-04-07 19:58:47,515 - INFO - Step step0 started 2019-04-07 19:58:53,530 - INFO - Step step1 started PK!L ,openbm/dat a/joblogs/Prueba_plan_yaD105111602019-04-15 18:44:00,226 - INFO - Job Prueba_plan_yaD10511160 started on node foobar 2019-04-15 18:44:00,226 - INFO - Step step0 started 2019-04-15 18:44:06,235 - INFO - Step step1 started PK!,P+openbm/dat a/joblogs/Prueba_plan_yaD77035402019-03-18 19:51:00,161 - INFO - Job Prueba_plan_yaD7703540 started on node foobar 2019-03-18 19:51:00,161 - INFO - Step step0 started 2019-03-18 19:51:06,175 - INFO - Step step1 started PK!Ĉ ,openbm/dat a/joblogs/Prueba_plan_yaI105086402019-04-15 19:26:00,130 - INFO - Job Prueba_plan_yaI10508640 started on node foobar 2019-04-15 19:26:00,130 - INFO - Step step0 started 2019-04-15 19:26:06,146 - INFO - Step step1 started PK!,openbm/dat a/joblogs/Prueba_plan_yaI105087602019-04-15 19:24:00,127 - INFO - Job Prueba_plan_yaI10508760 started on node foobar 2019-04-15 19:24:00,127 - INFO - Step step0 started 2019-04-15 19:24:06,136 - INFO - Step step1 started PK!Ǽ,openbm/dat a/joblogs/Prueba_plan_yaI105088202019-04-15 19:23:00,123 - INFO - Job Prueba_plan_yaI10508820 started on node foobar 2019-04-15 19:23:00,123 - INFO - Step step0 started 2019-04-15 19:23:06,136 - INFO - Step step1 started PK!@,openbm/dat a/joblogs/Prueba_plan_yaI105088802019-04-15 19:22:00,156 - INFO - Job Prueba_plan_yaI10508880 started on node foobar 2019-04-15 19:22:00,156 - INFO - Step step0 started 2019-04-15 19:22:06,164 - INFO - Step step1 started PK!o,openbm/dat a/joblogs/Prueba_plan_yaI105090002019-04-15 19:20:00,149 - INFO - Job Prueba_plan_yaI10509000 started on node foobar 2019-04-15 19:20:00,149 - INFO - Step step0 started 2019-04-15 19:20:06,158 - INFO - Step step1 started PK!ʼ,openbm/dat a/joblogs/Prueba_plan_yaI105091202019-04-15 19:18:00,138 - INFO - Job Prueba_plan_yaI10509120 started on node foobar 2019-04-15 19:18:00,139 - INFO - Step step0 started 2019-04-15 19:18:06,163 - INFO - Step step1 started PK!fؼ,openbm/dat a/joblogs/Prueba_plan_yaI105093002019-04-15 19:15:00,144 - INFO - Job Prueba_plan_yaI10509300 started on node foobar 2019-04-15 19:15:00,144 - INFO - Step step0 started 2019-04-15 19:15:06,162 - INFO - Step step1 started PK!^S,openbm/dat a/joblogs/Prueba_plan_yaI105099602019-04-15 19:04:00,120 - INFO - Job Prueba_plan_yaI10509960 started on node foobar 2019-04-15 19:04:00,120 - INFO - Step step0 started 2019-04-15 19:04:06,137 - INFO - Step step1 started PK!|7& ,openbm/dat a/joblogs/Prueba_plan_yaI105108602019-04-15 18:49:00,154 - INFO - Job Prueba_plan_yaI10510860 started on node foobar 2019-04-15 18:49:00,154 - INFO - Step step0 started 2019-04-15 18:49:06,188 - INFO - Step step1 started PK!i,openbm/dat a/joblogs/Prueba_plan_yaI105111602019-04-15 18:44:00,286 - INFO - Job Prueba_plan_yaI10511160 started on node foobar 2019-04-15 18:44:00,287 - INFO - Step step0 started 2019-04-15 18:44:06,296 - INFO - Step step1 started PK!TT+openbm/dat a/joblogs/Prueba_plan_yaR56251392019-02-25 13:51:01,108 - INFO - Job Prueba_plan_yaR5625139 started on node foobar 2019-02-25 13:51:01,108 - INFO - Step step0 started 2019-02-25 13:51:07,122 - INFO - Step step1 started PK!m+openbm/dat a/joblogs/Prueba_plan_yaR56252042019-02-25 13:49:56,376 - INFO - Job Prueba_plan_yaR5625204 started on node foobar 2019-02-25 13:49:56,376 - INFO - Step step0 started 2019-02-25 13:50:02,392 - INFO - Step step1 started PK!-$+openbm/dat a/joblogs/Prueba_plan_yaR56252912019-02-25 13:48:28,857 - INFO - Job Prueba_plan_yaR5625291 started on node foobar 2019-02-25 13:48:28,857 - INFO - Step step0 started 2019-02-25 13:48:34,873 - INFO - Step step1 started PK!8h+openbm/dat a/joblogs/Prueba_plan_yaR56255952019-02-25 13:43:25,216 - INFO - Job Prueba_plan_yaR5625595 started on node foobar 2019-02-25 13:43:25,217 - INFO - Step step0 started 2019-02-25 13:43:31,270 - INFO - Step step1 started PK!+openbm/dat a/joblogs/Prueba_plan_yaR56262092019-02-25 13:33:10,597 - INFO - Job Prueba_plan_yaR5626209 started on node foobar 2019-02-25 13:33:10,597 - INFO - Step step0 started 2019-02-25 13:33:16,616 - INFO - Step step1 started PK!Ym+openbm/dat a/joblogs/Prueba_plan_yaR56263732019-02-25 13:30:26,865 - INFO - Job Prueba_plan_yaR5626373 started on node foobar 2019-02-25 13:30:26,865 - INFO - Step step0 started 2019-02-25 13:30:32,878 - INFO - Step step1 started PK!u>+openbm/dat a/joblogs/Prueba_plan_yaR56266752019-02-25 13:25:24,865 - INFO - Job Prueba_plan_yaR5626675 started on node foobar 2019-02-25 13:25:24,865 - INFO - Step step0 started 2019-02-25 13:25:30,878 - INFO - Step step1 started PK!s˻+openbm/dat a/joblogs/Prueba_plan_yaR56269852019-02-25 13:20:15,014 - INFO - Job Prueba_plan_yaR5626985 started on node foobar 2019-02-25 13:20:15,014 - INFO - Step step0 started 2019-02-25 13:20:21,022 - INFO - Step step1 started PK!7L+openbm/dat a/joblogs/Prueba_plan_yaR56271752019-02-25 13:17:04,532 - INFO - Job Prueba_plan_yaR5627175 started on node foobar 2019-02-25 13:17:04,532 - INFO - Step step0 started 2019-02-25 13:17:10,558 - INFO - Step step1 started PK!,ڌ+openbm/dat a/joblogs/Prueba_plan_yaR77259742019-03-18 13:37:06,349 - INFO - Job Prueba_plan_yaR7725974 started on node foobar 2019-03-18 13:37:06,349 - INFO - Step step0 started 2019-03-18 13:37:12,396 - INFO - Step step1 started PK!openbm/dat a/jobs.db](namespec exec_timee.K .}(K}(namePrueba_plan_diariaspec# Test de que esta bien name: Prueba_plan_diaria node: foobar jobset: - hola owner: /root schedule: - second: 0 # Esto aqui esta bien prereqs: - name: 'Prueba1.%TODAY%' steps: - cmd: 'sleep 6' - cmd: 'true' exec_time](KKKKe__id__K __version__KuK}(namePrueba_plan_2minspec# Test de que esta bien name: Prueba_plan_2min node: foobar jobset: - hola owner: /root schedule: - minute: '*/1' # Esto aqui esta bien steps: - cmd: 'sleep 6' - cmd: 'false' exec_time](KKKKe__id__K __version__KuK}(namePrueba_plan_1minspec# Test de que esta bien name: Prueba_plan_1min node: foobar jobset: - hola owner: /root schedule: - minute: '*/1' # Esto aqui esta bien steps: - cmd: 'sleep 6' - cmd: 'true' exec_time](KKKKe__id__K __version__KuK }(namePrueba_plan_yaspec# Test de que esta bien name: Prueba_plan_ya node: foobar jobset: - hola owner: /root schedule: - second: 0 # Esto aqui esta bien steps: - cmd: 'sleep 6' - cmd: 'true' abdcond: 'steps.step1.exception isnot None' exec_time](KKKKe__id__K __version__Kuu.o}name}(Prueba_plan_diaria]KaPrueba_plan_2min]KaPrueba_plan_1min]KaPrueba_plan_ya]K aus."}(nameNspecN exec_timeNu.PK! popenbm/dat a/jobsets.dbP](nameenabled starttimeendtimetimezone totalslots usedslotse.K.}. }name}s.W}(nameNenabledN starttimeNendtimeNtimezoneN totalslotsN usedslotsNu.PK!P^ ^ openbm/dat a/list.db](idjobdatae.K . }(K}(idPrueba_plan_diariaI10508640jobdata}(jobnamePrueba_plan_diariajobidhprioKowner/root est_sta_timeN est_exec_timeK scheduler1prereq]Prueba1.%TODAY%aprovides]!Prueba_plan_diaria.D190415T172600asource](}cmdsleep 6s}cmdtrueseruns]u__id__K __version__KuK}(hPrueba_plan_2minI10508640h}(hPrueba_plan_2minhh#h Kh /rooth pendulum.datetimeDateTime(MKKKKKKpendulum.tz.timezone FixedTimezoneKUTCR}(_nameh-_offsetK _utcoffsetdatetime timedeltaKKKRubtRh Khhh]h]Prueba_plan_2min.D190415T172600ah](}cmdsleep 6s}cmdfalseseh]}(sta_timeh)(MKKKKKJ1eh/tRsto_timeh)(MKKKKKJh/tRnodefoobarrc JobAbend(1)uastatusABDuh Kh!KuK}(hPrueba_plan_1minI10508640h}(hPrueba_plan_1minhhTh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_1min.D190415T172600ah](}cmdsleep 6s}cmdtrueseh]}(hGh)(MKKKKKMh/tRhJh)(MKKKKKJBNh/tRhMhNhOEND_OKuahQFOKuh Kh!KuK}(hPrueba_plan_yaI10508640h}(hPrueba_plan_yahhmh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_ya.D190415T172600ah](}cmdsleep 6s}(cmdtrueabdcond steps.step1.exception isnot Noneueh]}(hGh)(MKKKKKJh/tRhJh)(MKKKKKJ:h/tRhMhNhOABDCONDuahQhRuh Kh!KuK}(hPrueba_plan_1minI10508580h}(hhVhhh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_1min.D190415T172700ah](}cmdsleep 6s}cmdtrueseh]}(hGh)(MKKKKKM&h/tRhJh)(MKKKKKJh/tRhMhNhOhjuahQhkuh Kh!KuK}(hPrueba_plan_1minI10508520h}(hhVhhh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_1min.D190415T172800ah](}cmdsleep 6s}cmdtrueseh]}(hGh)(MKKKKKMhh/tRhJh)(MKKKKKJEh/tRhMhNhOhjuahQhkuh Kh!KuK}(hPrueba_plan_1minI10508460h}(hhVhhh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_1min.D190415T172900ah](}cmdsleep 6s}cmdtrueseh]}(hGh)(MKKKKKJ h/tRhJh)(MKKKKKJuh/tRhMhNhOhjuahQhkuh Kh!KuK}(hPrueba_plan_1minI10508400h}(hhVhhh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_1min.D190415T173000ah](}cmdsleep 6s}cmdtrueseh]}(hGh)(MKKKKKMh/tRhJh)(MKKKKKM}h/tRhMhNhOhjuahQhkuh Kh!KuK}(hPrueba_plan_1minI10508340h}(hhVhhh Kh /rooth h)(MKKKKKKh/tRh Khhh]h]Prueba_plan_1min.D190415T173100ah](}cmdsleep 6s}cmdtrueseh]}(hGNhJNhMfoobarhONuauh Kh!Kuu.6}id}(Prueba_plan_diariaI10508640]KaPrueba_plan_2minI10508640]KaPrueba_plan_1minI10508640]KaPrueba_plan_yaI10508640]KaPrueba_plan_1minI10508580]KaPrueba_plan_1minI10508520]KaPrueba_plan_1minI10508460]KaPrueba_plan_1minI10508400]KaPrueba_plan_1minI10508340]Kaus.}(idNjobdataNu.PK!ㆯopenbm/dat a/localjobs.db](jobidstepexecutore.K..}(K}(jobidPrueba1R18976445stepstep0executorcmd__id__K __version__KuK}(hhhstep1hhhKh KuK}(jobidPrueba_plan_2minI4104620stepstep0executorcmd__id__K __version__KuK}(jobidPrueba_plan_2minI4104500stepstep0executorcmd__id__K __version__KuK}(hhhstep1hhhKhKuK}(jobidPrueba_plan_2minI4103900stepstep0executorcmd__id__K __version__KuK}(h!h"h#step1h%h&h'Kh(KuK}(jobidPrueba_plan_2minI4103300stepstep0executorcmd__id__K __version__KuK}(jobidPrueba_plan_2minI4102820stepstep0executorcmd__id__K __version__KuK}(h5h6h7step1h9h:h;KhKj?KuK}(jobidPrueba_plan_2minR9704981stepKexecutorcmd__id__K __version__KuK}(jBjCjDKjEjFjGKjHKuK}(jobidPrueba_plan_yaI10511160stepKexecutorcmd__id__K __version__KuK}(jKjLjMKjNjOjPKjQKuK}(jobidPrueba_plan_yaI10510860stepKexecutorcmd__id__K __version__KuK}(jTjUjVKjWjXjYKjZKuK}(jobidPrueba_plan_yaI10509960stepKexecutorcmd__id__K __version__KuK}(j]j^j_Kj`jajbKjcKuK}(jobidPrueba_plan_yaI10509300stepKexecutorcmd__id__K __version__KuK}(jfjgjhKjijjjkKjlKuK}(jobidPrueba_plan_1minI10509120stepKexecutorcmd__id__K __version__KuK}(joPrueba_plan_yaI10509120jqKjrjsjtKjuKuK}(jojpjqKjrjsjtKjuKuK}(jojwjqKjrjsjtKjuKuK}(jobidPrueba_plan_1minI10509000stepKexecutorcmd__id__K __version__KuK}(j{Prueba_plan_yaI10509000j}Kj~jjKjKuK}(j{j|j}Kj~jjKjKuK}(j{jj}Kj~jjKjKuK}(jobidPrueba_plan_1minI10508880stepKexecutorcmd__id__K __version__KuK}(jPrueba_plan_yaI10508880jKjjjKjKuK}(jjjKjjjKjKuK}(jjjKjjjKjKuK}(jobidPrueba_plan_1minI10508820stepKexecutorcmd__id__K __version__KuK}(jPrueba_plan_yaI10508820jKjjjKjKuK}(jjjKjjjKjKuK}(jjjKjjjKjKuK}(jobidPrueba_plan_yaI10508760stepKexecutorcmd__id__K __version__KuK}(jjjKjjjKjKuK}(jobidPrueba_plan_1minI10508400stepKexecutorcmd__id__Ǩ __version__KuK}(jjjKjjjKjKuu.}jobid}(Prueba1R18976445](KKePrueba_plan_2minI4104620]KaPrueba_plan_2minI4104500](KKePrueba_plan_2minI4103900](KKePrueba_plan_2minI4103300]KaPrueba_plan_2minI4102820](KKePrueba_plan_2minI4102580](KKePrueba_plan_2minI4101500](KKePrueba_plan_2minI4101140](KKePrueba_plan_2minI4100900](KKePrueba_plan_2minI4318840](KKePrueba_plan_2minI5503120](KKePrueba_plan_2minI5502940](K K!ePrueba_plan_2minI5503540](K#K%K'K)K+K-K/K1K;K=K?KAePrueba_plan_2minI5503570](K3K5K7K9ePrueba_plan_yaR5626985]KEaPrueba_plan_yaR5626675]KGaPrueba_plan_yaR5626209]KKaPrueba_plan_yaR5625595]KMaPrueba_plan_yaR5625291](KNKOePrueba_plan_yaR5625204](KPKQePrueba_plan_yaR5625139](KRKSePrueba_plan_2minI85-687659](KyK{ePrueba_plan_2minR9706672](K|K}ePrueba_plan_2minR9706405](K~KePrueba_plan_2minR9706176](KKePrueba_plan_2minR9705565](KKePrueba_plan_2minR9705208](KKePrueba_plan_2minR9704981](KKePrueba_plan_yaI10511160](KKePrueba_plan_yaI10510860](KKePrueba_plan_yaI10509960](KKePrueba_plan_yaI10509300](KKePrueba_plan_1minI10509120](KKePrueba_plan_yaI10509120](KKePrueba_plan_1minI10509000](KKePrueba_plan_yaI10509000](KKePrueba_plan_1minI10508880](KKePrueba_plan_yaI10508880](KKePrueba_plan_1minI10508820](KKePrueba_plan_yaI10508820](KKePrueba_plan_yaI10508760](KKePrueba_plan_1minI10508400](KKeus."}(jobidNstepNexecutorNu.PK!0lAopenbm/dat a/network.conf.test[NETWORK] server=major.dd.com PK!?((openbm/dummy_module.pyclass DummyPlugin(): OSKY='THEBEST' PK!Uopenbm/exceptions.pyimport sys class InternalSchedulerError(Exception): def __init__(self, ee): self.ee = ee __, __, self.tb = sys.exc_info() def re_raise(self): raise self.ee.with_traceback(self.tb) class SchedulerError(BaseException): pass class ScheduleError(SchedulerError): pass class JobException(Exception): def __str__(self): return f'{self.__class__.__name__}({self.__cause__})' class JobError(JobException): """An error prevented the execution of the job""" class JobAbend(JobException): """A job abnormally ended""" class NoSuchSchedule(ScheduleError): pass class JobStoreError(SchedulerError): pass class NoSuchJob(JobStoreError): pass class DuplicateJob(JobStoreError): pass class NoSuchJobSet(JobStoreError): pass class DuplicateJobSet(JobStoreError): pass class NoSuchNode(JobStoreError): pass class DuplicateNode(JobStoreError): pass class TriggerError(SchedulerError): pass PK!pS)S)&openbm/major/DAG_sqlalchemy.jobview.pyfrom datetime import timedelta import networkx as nx from networkx.algorithms.dag import ancestors, descendants from sqlalchemy import (create_engine, Column, ForeignKey, select, orm, DateTime, Unicode, Boolean, CHAR, Integer, PickleType) from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import (relationship, sessionmaker, scoped_session) from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.pool import StaticPool from sqlalchemy_querybuilder import Filter import pendulum # from openbm.major.utils import decode_jobid, dsndate_fmt, dsntime_fmt Base = declarative_base() class Job(Base): __tablename__ = 'jobs' jobid = Column('jobid', Unicode(16), primary_key=True, nullable=False) scheduler = Column('sched', CHAR(1), nullable=False) status = Column('status', Unicode(3), index=True) jobname = Column('jobname', Unicode(16)) provider = Column('provider', Unicode(512)) est_sta_time = Column('est_sta_time', DateTime) est_exec_time = Column('est_exec_time', Integer) prio = Column('prio', Integer) wait = Column('wait', Unicode(16)) owner = Column('owner', Unicode(64)) steps = Column('steps', PickleType) runs = relationship('JobRun', lazy='subquery', cascade='save-update, merge, delete') prereq = relationship('PreReq', lazy='subquery', cascade='save-update, merge, delete') provider = relationship('Provider', lazy='subquery', cascade='save-update, merge, delete') @orm.reconstructor def init_on_load(self): print(type(self.higher_neighbors())) print(type(self.lower_neighbors())) @hybrid_property def runid(self): return self.prio @runid.expression def runid1(cls): print(cls) # return select([JobRun.runid]).where( # cls.jobid == JobRun.jobid).as_scalar() # return self.runs[0].runid if self.runs else None @hybrid_property def providesa(self): return self.prio @hybrid_property def rc(self): return self.runs[0].rc if self.runs else None @hybrid_property def node(self): return self.runs[0].node if self.runs else None @node.expression def node(cls): return select([JobRun.node]).where(cls.jobid == JobRun.jobid).as_scalar() @hybrid_property def sta_time(self): return self.runs[0].sta_time if self.runs else None @hybrid_property def sto_time(self): return self.runs[0].sto_time if self.runs else None @hybrid_property def runtime(self): return self.runs[0].runtime if self.runs else 0 def __json__(self, request): tz = request.registry.settings['timezone'] # print(self.provider) # for r in self.provider: # print('HOLLA') # print(r) # print(r.ancestors()) return {'jobid': self.jobid, 'scheduler': self.scheduler, 'jobname': self.jobname, 'prio': self.prio, 'wait': self.wait, 'owner': self.owner, 'node': self.node, 'runid': self.runid, 'status': self.status, 'rc': self.rc, 'est_exec_time': self.est_exec_time or 0, 'est_sta_time': pendulum.instance(self.est_sta_time, tz).isoformat() if self.est_sta_time else None, 'sta_time': pendulum.instance(self.sta_time, tz).isoformat() if self.sta_time else None, 'est_sto_time': (pendulum.instance(self.est_sta_time, tz) + timedelta(self.est_exec_time)).isoformat() if self.est_sta_time else None, 'sto_time': pendulum.instance(self.sto_time, tz).isoformat() if self.sto_time else None, 'runtime': self.runtime, 'runs': self.runs, 'prereq': self.prereq, 'provider': self.provider, } class JobRun(Base): __tablename__ = 'run' jobid = Column('jobid', Unicode(16), ForeignKey('jobs.jobid'), primary_key=True) runid = Column('runid', Integer) # status = Column('status', Unicode(3), nullable=False, index=True) node = Column('node', Unicode(64)) rc = Column('rc', CHAR(6)) # est_sta_time = Column('est_sta_time', DateTime) sta_time = Column('sta_time', DateTime, index=True) # est_sto_time = Column('est_sto_time', DateTime) sto_time = Column('sto_time', DateTime, index=True) @hybrid_property def runtime(self): if not self.sto_time or not self.sta_time: return None else: return (self.sto_time - self.sta_time).seconds / 60 def __json__(self, request): tz = request.registry.settings['timezone'] return {'jobid': self.jobid, 'runid': self.runid, 'node': self.node, # 'status': self.status, 'rc': self.rc, # 'est_sta_time': pendulum.instance(self.est_sta_time, # tz).isoformat() if # self.est_sta_time else None, 'sta_time': pendulum.instance(self.sta_time, tz).isoformat() if self.sta_time else None, # 'est_sto_time': self.est_sto_time.isoformat() if # self.est_sto_time else None, # 'sto_time': self.sto_time.isoformat() if self.sto_time # else None, } class PreReq(Base): __tablename__ = 'prereq' jobid = Column('jobid', Unicode(16), ForeignKey('jobs.jobid'), primary_key=True) name = Column('name', Unicode(512), primary_key=True) cond = Column('cond', Boolean) status = Column('status', Boolean) def __json__(self, request): return {} class Provider(Base): __tablename__ = 'provider' name = Column('name', Unicode(512), primary_key=True) jobid = Column('jobid', Unicode(16), ForeignKey('jobs.jobid')) job = relationship('Job', lazy='subquery') def __json__(self, request): return {'jobid': self.jobid, 'ancestors': 't', 'provider': self.name} class JobDep(Base): __tablename__ = 'jobdep' waiter = Column('waiter', Unicode(16), ForeignKey('jobs.jobid'), primary_key=True) resolver = Column('resolver', Unicode(16), ForeignKey('jobs.jobid'), primary_key=True) _descendants = relationship(Job, primaryjoin=waiter == Job.jobid, backref='descendants', lazy='joined') _ancestors = relationship(Job, primaryjoin=resolver == Job.jobid, backref='ancestors', lazy='joined') def __init__(self, waiter, resolver): self.waiter = waiter.jobid self.resolver = resolver.jobid # TODO: use multi-provides # self.waiter = waiter.name # self.resolver = resolver.name def __json__(self, request): return {'name': self.name, 'resolver': self.resolver, 'status': self.status, } engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool, echo=False) Base.metadata.create_all(engine) session_factory = sessionmaker(bind=engine) Session = scoped_session(session_factory) query = Session.query() jobfilter = Filter({'jobs': Job}, query) def load(joblist, dayshift, tz): for job in joblist: insert_job(job['jobdata'].copy(), dayshift, tz) # print('@------@-----@') # for i in Session.query(JobDep).all(): # print(i.waiter + '< - >' + i.resolver) def insert_job(jobdata, dayshift, tz): # schedule = decode_jobid(jobdata['jobid'], dayshift, tz) for runid, rundata in enumerate(jobdata['runs']): Session.add(JobRun(runid=runid, **rundata)) providers = [] for provider in jobdata['provider']: provider_obj = Provider(jobid=jobdata['jobid'], name=provider) Session.add(provider_obj) providers.append(provider_obj) for prereq in jobdata['prereq']: name = prereq['name'] if 'type' not in prereq or prereq['type'] == 'job': dep = Session.query(Provider).filter_by(name=name).first() for provider in providers: Session.add(JobDep(provider, dep)) Session.add(PreReq(jobid=jobdata['jobid'], name=name, cond=prereq.get('cond', False), status=False)) del jobdata['runs'] del jobdata['provider'] del jobdata['prereq'] Session.add(Job(**jobdata)) Session.commit() def insert_run(jobid, runid, rundata): Session.add(JobRun(jobid=jobid, runid=runid, )) try: Session.commit() except (IntegrityError, FlushError): Session.rollback() def modify(jobid, runid, jobdata, rundata): job = Session.query(Job).filter(Job.jobid == jobid).one() for key in jobdata: setattr(job, key, jobdata[key]) try: run = Session.query(JobRun).filter(JobRun.jobid == jobid and JobRun.runid == runid).one() for key in rundata: setattr(run, key, rundata[key]) except NoResultFound: insert_run(jobid, runid, rundata.copy()) Session.commit() def filter(filters): if 'condition' not in filters: filters['condition'] = 'AND' if not filters['rules']: # If no filters are passed, force at least one so there's a query filters['rules'].append({'field': 'jobs.jobname', 'operator': 'contains', 'value': ''}) hola = jobfilter.querybuilder(filters).all() print(hola) print(hola[0].lower_neighbors()) print(hola[0].ancestors) print(hola[0].descendants) return hola PK!1p[RRopenbm/major/__init__.py#!/usr/bin/env python # # Copyright (C) 2016 Oscar Curero # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # import asyncio # from collections import OrderedDict from datetime import datetime import logging import pickle import pytz import re # import sys from aioprocessing import AioPipe, AioEvent from multiprocessing import Process from apscheduler.jobstores.base import JobLookupError from apscheduler.jobstores.memory import MemoryJobStore # from apscheduler.schedulers.asyncio import AsyncIOScheduler ? # from apscheduler.executors.pool import ProcessPoolExecutor ? from apscheduler.triggers.cron import CronTrigger from apscheduler.triggers.date import DateTrigger import networkx as nx from networkx.algorithms.dag import descendants, ancestors import pendulum import tblib.pickling_support import websockets import yaml from openbm.plugins.load import (load_notifiers, load_auth_backend, ) from openbm.minor import MinorScheduler # from openbm.major.catalogs import (SimpleCatalog) from openbm.major.catalogs import (ClusterCatalog, SimpleCatalog) # from openbm.major.nodestores import (SimpleNodeStore) from openbm.major.exceptions import (DuplicateJob, InternalSchedulerError, NoSuchJob, ) # from openbm.major.executors import NodePoolExecutor from openbm.major.nodestores import (RaftNodeStore, SimpleNodeStore) # from openbm.major import dist_logging # from openbm.major.triggers import OBMTrigger from openbm.major.utils import dsndate_fmt, dsntime_fmt import openbm.major.jobview from openbm.major.webapps import webserver tblib.pickling_support.install() loop = asyncio.get_event_loop() logger = logging.getLogger(__name__) class MajorScheduler(MinorScheduler): """Base class for Major scheduler""" cron_many = re.compile('\*|/|-') def __init__(self, config): # self.jobview_queue = [] self.schid = config['major'].pop('default_scheduler', 1) self.tz = config['major']['timezone'] logger.info(f'starting in {self.tz} timezone') self.ws_clients = dict() self.locks = dict() # self.node_pool = NodePoolExecutor(self, self.ws_clients) # config['jobstores']['major']) super().__init__(config) user_dayshift = config['major']['reload_time'].split(':') self.dayshift = pendulum.now(self.tz ).replace(hour=int(user_dayshift[0]), minute=int(user_dayshift[1]), second=int(user_dayshift[2])) if self.dayshift < pendulum.now(self.tz): self.dayshift = self.dayshift.add(days=1) # self.major = self._jobstores['major'] self.dep_mgr = nx.DiGraph() for jobset in self.catalog.get_jobsets(): self.dep_mgr.add_node(f'JOBSET_{jobset.name}') logger.debug(f'Created jobset {jobset.name} dependency') super().add_job(self._update_scheduler, 'cron', jobstore='int', hour=int(user_dayshift[0]), minute=int(user_dayshift[1]), second=int(user_dayshift[2])) self.notifiers = load_notifiers(config) self.auth = load_auth_backend(config) async def start(self, init): openbm.major.jobview.load(self.catalog.list, self.dayshift, self.tz) om_start = AioEvent() self.om_task = asyncio.ensure_future(self._run_om(self.config, om_start)) logger.debug('Waiting for Operation Manager to become active') await om_start.coro_wait() server = websockets.serve(self._ws_handler, self.config['major']['node_addr'], self.config['major']['node_port']) self.ws_server = await server await super().start() if init: await self._update_interval_jobs() logger.debug('interval jobs initialization complete') await self._update_scheduler() logger.warning('scheduler initialitzation complete') self.add_job(self._archive_jobview, trigger='interval', jobstore='int', minutes=10) await self.om_task def shutdown(self): logger.info('Waiting for jobs to finish') super().shutdown() async def stop(self): self.om_task.cancel() def _configure(self, config): # threads = asint(config.pop('threads', 50)) config['jobstores'] = {'major': MemoryJobStore()} # config['executors'] = {'node': self.node_pool} super()._configure(config) async def _ws_handle(self, major_ready): major_ready.set() async def _run_om(self, config, event): logger.debug('Launching OM in another process') ep_local, ep_remote = AioPipe() log_local, log_remote = AioPipe() # om_proc = AioProcess(target=webserver.run, daemon=True, # args=(config, ep_remote, log_remote, event)) om_proc = Process(target=webserver.run, args=(config, ep_remote, log_remote, event)) om_proc.daemon = True om_proc.start() while True: try: msg = await ep_local.coro_recv() except asyncio.CancelledError as ex: logger.debug('Stopping OM task') om_proc.terminate() break if msg[0][0] is None: om_proc.terminate() raise RuntimeError try: func = self for method in msg[0][0].split('.'): func = getattr(func, method) reply = func(*msg[0][1:], **msg[1]) if asyncio.iscoroutine(reply): reply = await reply except BaseException as e: reply = InternalSchedulerError(e) ep_local.send(reply) def schedule_job(self, jobname, datetime): """Schedule a registered job to the scheduler""" job = self.catalog.get_job_repo(jobname) jobid = self.create_jobid(job["name"], 'R', datetime) jobdata = self.create_job(jobid, 'R', datetime, job) return jobdata['jobid'] def schedule_joboneshot(self, job, datetime): """Schedule a one-time job to the scheduler""" try: self.catalog.get_job_repo(job['name']) raise DuplicateJob() except NoSuchJob: pass jobid = self.create_jobid(job["name"], 'R', datetime) jobdata = self.create_job(jobid, 'R', datetime, job) return jobdata['jobid'] async def abort_job(self, jobid): run = self.catalog.getjob_joblist(jobid)['jobdata']['runs'][-1] if run['node'] != self.name: pass else: await super().abort_job(jobid) def query_jobview(self, filters): return openbm.major.jobview.filter(filters) def get_descendants(self, prov_name): return list(descendants(self.dep_mgr, prov_name)) def get_ancestors(self, prov_name): try: return self.dep_mgr.nodes[list( ancestors(self.dep_mgr, prov_name))[0]] except IndexError: return [] def delete_schedule(self, jobname): try: return super().remove_job(jobname, jobstore='major') except JobLookupError: # avoid exception chainning by using "raise ... from none" raise openbm.exceptions.ScheduleLookupError(jobname) from None except Exception as e: raise e def cluster_status(self): return False def find_leader(self): return False async def send_notify(self, event, message, group, title=''): recipients = self.auth.group_users(group.split('/')) send = [] for notifier in self.notifiers: if notifier.filter(event): send.append(loop.run_in_executor(None, notifier.notify, recipients, message, title)) await asyncio.gather(*send) def _manage_jobstatus(self, jobid, jobdata, rundata={}): self.catalog.modify_joblist(jobid, jobdata, rundata) openbm.major.jobview.modify(jobid, jobdata, rundata) if jobdata['status'] in ('ABD', 'NDF'): job = self.catalog.getjob_joblist(jobid)['jobdata'] if jobdata['status'] == 'ABD': message = f'Job {jobid} has ended abnormally' title = f'Job {jobid} ended abnormally' elif jobdata['status'] == 'NDF': message = f'Job {jobid} has undefined dependencies' title = f'Job {jobid} has unidefined dependencies' loop.create_task(self.send_notify(jobdata['status'], message, job['owner'], title)) if jobdata['status'] == 'FOK' and jobid[-9] == 'I': jobname = self.catalog.getjob_joblist(jobid)['jobdata']['jobname'] job = self.catalog.get_job_repo(jobname) loop.create_task(self._update_interval_job(job)) async def _ws_handler(self, ws, name): try: name, inits = name.lstrip('/').split('/') int(inits) except ValueError: await ws.close(code=4000) logger.error(f'Invalid URI from {ws.remote_address[0]}') logger.debug(f'New connection from {name} ({ws.remote_address[0]})') if name not in self.ws_clients and name != self.name: self.node_mgr.add_node(name, inits) self.ws_clients[name] = ws logger.info(f'node {name} initialized with {inits} inits') else: await ws.close(code=4001) logger.error(f'Duplicate node: {name}') return while True: try: data = await ws.recv() except websockets.exceptions.ConnectionClosed: self.node_mgr.modify_node(name, status='LST') del(self.ws_clients[name]) break method, args, kwargs, need_response = pickle.loads(data) response = getattr(self, method)(*args, **kwargs) if need_response: rrr = pickle.dumps(response) print(rrr) await ws.send(pickle.dumps(response)) async def _archive_jobview(self): print('JOB ARCHIVING IS RUNNING') async def _update_interval_jobs(self): logger.debug('scheduling jobs for the next 5min') for job in self.catalog.get_all_jobs_repo(): await self._update_interval_job(job) async def _update_interval_job(self, job): for schedule in yaml.load(job['spec']).get('schedule', []): if (re.match(self.cron_many, str(schedule.get('minute', '*'))) or re.match(self.cron_many, str(schedule.get('second', '*')))): await self._schedule_job('I', job, self.dayshift, **schedule) logger.debug('refresh ended OK') async def _update_scheduler(self): # self.dayshift = self.dayshift.add(days=1) logger.debug('Scheduling jobs for the next 24h') schedule_list = [] for job in self.catalog.get_all_jobs_repo(): for schedule in yaml.load(job['spec']).get('schedule', []): if (isinstance(schedule.get('minute', None), int) and isinstance(schedule.get('second', None), int)): schedule_list.append(self._schedule_job('D', job, self.dayshift, **schedule)) # schedule_list.append(self._schedule_interv_job(job, # **schedule)) if len(schedule_list) == 100: await asyncio.gather(*schedule_list) schedule_list = [] await asyncio.gather(*schedule_list) logger.debug('updating complete joblist') # openbm.major.jobview.load(self.catalog.list, self.dayshift, self.tz) logger.debug('refresh ended OK') async def _schedule_job(self, sch_type, job, date_limit, **triggerargs): schedule = datetime.now(tz=pytz.timezone(self.tz)) triggerargs.setdefault('timezone', self.tz) # try: cron = CronTrigger(**triggerargs) # except ValueError as ex: # raise ScheduleError(ex) while True: schedule = pendulum.instance(cron.get_next_fire_time(schedule, schedule)) if schedule and schedule <= date_limit: jobid = (f'{job["name"]}{sch_type}' f'{pendulum.instance(schedule).day_of_year}' f'{(self.dayshift - schedule).in_seconds():05}') jobid = self.create_jobid(job["name"], sch_type, schedule) self.create_job(jobid, sch_type, schedule, job) if sch_type == 'I': break else: break def create_jobid(self, jobname, schedule_type, schedule_datetime): """ Create a jobid string from a jobname, schedule type and schedule datetime """ timestamp = schedule_datetime or pendulum.now(self.tz) offset = (self.dayshift - timestamp).in_seconds() return f'{jobname}{schedule_type}{timestamp.day_of_year}{offset:05}' def create_job(self, jobid, sch_type, schedule, job): # NOTE Here we add our job to the scheduler using the TZ from # the scheduler because DateTrigger does not get it from main # tz = self.tz # schedule = pendulum.instance(schedule) # jobid = f'{job.id}{pendulum.now(self.tz).day_of_year}{id(job)}' # runid = f'{jobid}:0' est_exec_time = job['exec_time'][-1] # est_sta_time = schedule jobdict = yaml.load(job['spec']) schedule_date = schedule or pendulum.now(self.tz) dsn_date = dsndate_fmt(schedule_date) dsn_time = dsntime_fmt(schedule_date) default_provider = f'{job["name"]}.{dsn_date}' if sch_type in ('R', 'I'): default_provider += dsn_time if 'provides' not in jobdict: jobdict['provides'] = [] jobdict['provides'].append(default_provider) jobdata = {'jobname': job['name'], 'jobid': jobid, 'prio': 0, 'owner': jobdict['owner'], 'est_sta_time': None, 'est_exec_time': est_exec_time, 'scheduler': jobdict.get('scheduler', self.schid), 'prereq': [], 'provides': jobdict['provides'], 'source': jobdict.get('steps'), 'runs': []} for prereq in jobdict.get('prereqs', []): if 'type' in prereq and prereq['type'] == 'user': jobdata['status'] = 'USR' dep_name = prereq['name'].replace('%SCHDATE', dsn_date).replace('%SCHTIME', dsn_time) jobdata['prereq'].append(dep_name) self.catalog.add_joblist(jobid, jobdata) openbm.major.jobview.insert_job(jobdata.copy(), self.dayshift, self.tz) if not jobdict.get('prereqs', []): self.create_run(jobdict, jobdata, schedule) return jobdata def create_run(self, jobdict, jobdata, schedule): # NOTE Here we add our job to the scheduler using the TZ from # the scheduler because DateTrigger does not get it from main jobid = jobdata['jobid'] # runid = len(jobdata["runs"]) jobdata['runs'].append({'sta_time': None, 'sto_time': None, 'node': jobdict.get('node', None), 'rc': None }) self.add_job(self._subjob, jobstore='major', id=jobid, args=(jobid, jobdata), misfire_grace_time=None, trigger=DateTrigger(schedule, self.tz)) est_sta_time = self.get_job(jobid, 'major').next_run_time # est_time_stop = est_sta_time + timedelta(est_time_exec) self.catalog.modify_joblist(jobid, {'est_sta_time': est_sta_time}, {}) openbm.major.jobview.modify(jobid, {'est_sta_time': est_sta_time}, jobdata['runs'][-1]) def set_dependencies(self, job_name, sch_type, run_date, job): date_req = run_date or pendulum.now(self.tz) dsn_date = dsndate_fmt(date_req) dsn_time = dsntime_fmt(date_req) prov_name = f'{job_name}.{dsn_date}' if sch_type in ('R', 'I'): prov_name += f'.{dsn_time}' job['provider'] = prov_name if not self.dep_mgr.has_node(prov_name): self.dep_mgr.add_node(prov_name, jobid=job['jobid'], defined=True) else: self.dep_mgr.node[prov_name]['defined'] = True self.dep_mgr.node[prov_name]['jobid'] = job['jobid'] for num, dep in enumerate(job['prereqs']): dep_name = dep['name'].replace('%SCHDATE', dsn_date).replace('%SCHTIME', dsn_time) job['prereqs'][num]['name'] = dep_name logger.debug(f'Dependency set to {dep_name} for job {job_name}') if ('cond' in dep and dep['cond'] and not self.dep_mgr.has_node(dep_name)): continue if not self.dep_mgr.has_node(dep_name): self.dep_mgr.add_node(dep_name, jobid=None, defined=False) self.dep_mgr.add_edge(dep_name, prov_name) async def _subjob(self, jobid, jobspec): self._manage_jobstatus(jobid, {'status': 'RSB'}, {}) while True: try: node = self.node_mgr.select_node(jobspec['runs'][-1]['node']) break except ValueError: self._manage_jobstatus(jobid, {'status': 'WSB'}, {}) await self.node_mgr.condition.wait() self._manage_jobstatus(jobid, {'status': 'SBP'}, {'node': node['name']}) if node['name'] == self.name: # to local node await self._execjob(jobid, jobspec) else: # submit job to node self.locks[jobid] = asyncio.Condition() await self.ws_clients[node['name']].send('hola') # self._manage_jobstatus(jobid, {'status': 'SUB'}, {}) class StandaloneMajorScheduler(MajorScheduler): def __init__(self, config): self.type = 'standalone' self.node_mgr = SimpleNodeStore(config) self.catalog = SimpleCatalog(config['minor']['data_dir']) super().__init__(config) class ClusterMajorScheduler(MajorScheduler): def __init__(self, config): self.type = 'cluster' self.node = '{}:{}'.format(config['major']['cluster_addr'], config['major']['cluster_port']) self.node_mgr = RaftNodeStore(config) self.catalog = ClusterCatalog(config['minor']['data_dir'], self.node, config['cluster']) super().__init__(config) def _configure(self, config): self.node_mgr = RaftNodeStore(config) # cluster = RaftCatalog(node, config.pop('cluster')) logger.warning('Waitting for cluster to become ready') while not self.catalog.isReady(): asyncio.sleep(1) # self.catalog.setaddr(self.node, config.pop('om_addr'), # config.pop('om_port')) super()._configure(config) PK!SzQQopenbm/major/catalogs.py#!/usr/bin/env python # # Copyright (C) 2016 Oscar Curero # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # # from datetime import datetime import logging import os from pydblite import Base from pysyncobj import SyncObj, SyncObjConf # from singleton_decorator import singleton from openbm.major.exceptions import (NoSuchJob, NoSuchJobSet, DuplicateJob, DuplicateJobSet, ) # import openbm.major.jobview logger = logging.getLogger(__name__) class SimpleCatalog(object): def __init__(self, path): self.repo = Base(os.path.join(path, 'jobs.db')) self.list = Base(os.path.join(path, 'list.db')) self.jobsets = Base(os.path.join(path, 'jobsets.db')) if self.repo.exists(): self.repo.open() else: self.repo.create('name', 'spec', 'exec_time') self.repo.create_index('name') if self.list.exists(): self.list.open() else: self.list.create('id', 'jobdata') self.list.create_index('id') if self.jobsets.exists(): self.jobsets.open() else: self.jobsets.create('name', 'enabled', 'starttime', 'endtime', 'timezone', 'totalslots', 'usedslots') self.jobsets.create_index('name') def add_job_repo(self, jobname, jobspec): if jobname not in self.repo._name: self.repo.insert(name=jobname, spec=jobspec, exec_time=[0, 0, 0, 0]) self.repo.commit() else: raise DuplicateJob(f'jobname {jobname} already exists') def modify_job_repo(self, jobname, jobspec): record = self.repo._name[jobname] if record: self.repo.update(record, jobspec=jobspec) self.repo.commit() else: raise NoSuchJob(f'no such jobname: {jobname}') logger.debug(f'job {jobname} modified') def delete_job_repo(self, jobname): record = self.repo._name[jobname] if record: self.repo.delete(record) self.repo.commit() else: raise NoSuchJob(f'no such jobname: {jobname}') logger.debug(f'job {jobname} removed') def get_job_repo(self, jobname): record = self.repo._name[jobname] if record: return record[0] else: raise NoSuchJob(f'no such jobname: {jobname}') def get_all_jobs_repo(self): return self.repo def add_joblist(self, jobid, jobdict): assert jobid not in self.list._id self.list.insert(id=jobid, jobdata=jobdict) self.list.commit() def modify_joblist(self, jobid, jobdata, rundata): jobdict = self.list._id[jobid][0]['jobdata'] jobdict.update(jobdata) if rundata is not None: jobdict['runs'][-1].update(rundata) record = self.list._id[jobid][0] self.list.update(record, jobdata=jobdict) self.list.commit() # openbm.major.jobview.modify(jobid, runid, jobdata, rundata) def delete_joblist(self, jobid): record = self.list._id[jobid] self.list.delete(record) self.list.commit() def getjob_joblist(self, jobid): return self.list._id[jobid][0] def add_jobset(self, name, starttime, endtime, totalslots, timezone): if name not in self.jobsets._name: self.jobsets.insert(name=name, enabled=True, starttime=starttime, endtime=endtime, timezone=timezone, totalslots=totalslots, usedslots=0 ) self.jobsets.commit() logger.debug(f'job set {name} added') else: raise DuplicateJobSet(f'job set {name} already exists') def get_jobset(self, name): record = self.jobsets._name[name] if record: return record[0] else: raise NoSuchJobSet(f'no such job set: {name}') def get_jobsets(self, jobsets=None): if jobsets: return [jobset for jobset in self.jobsets._name if jobset['name'] in jobsets] else: return self.jobsets def modify_jobset(self, name, jobsetdata): jobset = self.jobsets._name[name] if not jobset: raise NoSuchJobSet(f'no such job set: {name}') self.jobsets.update(jobset, **jobsetdata) self.jobsets.commit() def delete_jobset(self, name): record = self.jobsets._name[name] if record: self.jobsets.delete(record) self.jobsets.commit() else: raise NoSuchJobSet(f'no such job set: {name}') logger.debug(f'job set {name} removed') class ClusterCatalog(SyncObj): def __init__(self, path, node, nodes): logger.debug([_[1] for _ in nodes]) logger.debug(node) conf = SyncObjConf(dynamicMembershipChange=True, fullDumpFile=os.path.join(path, f'dump'), journalFile=os.path.join(path, f'journal')) SyncObj.__init__(self, node, [_[1] for _ in nodes], conf) self._offset = 0 self._joboffsets = {} self._websrv = {} def get_jobsets(self, jobsets=None): return [] PK!emopenbm/major/dist_logging.pyfrom logging import handlers class PipeHandler(handlers.QueueHandler): def __init__(self, pipe): super().__init__(pipe) def enqueue(self, record): return self.queue.send(record) class PipeListener(handlers.QueueListener): # def __init__(self, pipe, *handlers, respect_handler_level=False): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def dequeue(self, _): return self.queue.recv() PK!:Kopenbm/major/exceptions.pyimport sys class InternalSchedulerError(Exception): def __init__(self, ee): self.ee = ee __, __, self.tb = sys.exc_info() def re_raise(self): raise self.ee.with_traceback(self.tb) class SchedulerError(BaseException): pass class ScheduleError(SchedulerError): pass class JobError(SchedulerError): pass class NoSuchSchedule(ScheduleError): pass class JobStoreError(SchedulerError): pass class NoSuchJob(JobStoreError): pass class DuplicateJob(JobStoreError): pass class NoSuchJobSet(JobStoreError): pass class DuplicateJobSet(JobStoreError): pass class NoSuchNode(JobStoreError): pass class DuplicateNode(JobStoreError): pass class TriggerError(SchedulerError): pass PK! Popenbm/major/jobview.pyfrom datetime import timedelta import networkx as nx # from networkx.algorithms.dag import ancestors, descendants from sqlalchemy import (create_engine, Column, ForeignKey, select, DateTime, Unicode, Boolean, CHAR, Integer, PickleType) from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import (relationship, sessionmaker, scoped_session) from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.pool import StaticPool from sqlalchemy_querybuilder import Filter import pendulum # from openbm.major.utils import decode_jobid, dsndate_fmt, dsntime_fmt Base = declarative_base() class Job(Base): __tablename__ = 'jobs' jobid = Column('jobid', Unicode(16), primary_key=True, nullable=False) scheduler = Column('sched', CHAR(1), nullable=False) status = Column('status', Unicode(3), index=True) jobname = Column('jobname', Unicode(16)) est_sta_time = Column('est_sta_time', DateTime) est_exec_time = Column('est_exec_time', Integer) prio = Column('prio', Integer) wait = Column('wait', Unicode(16)) owner = Column('owner', Unicode(64)) steps = Column('steps', PickleType) provides = relationship('Provider', lazy='subquery') runs = relationship('JobRun', lazy='subquery', cascade='save-update, merge, delete') @hybrid_property def runid(self): return self.prio @hybrid_property def rc(self): return self.runs[0].rc if self.runs else None @hybrid_property def node(self): return self.runs[0].node if self.runs else None @node.expression def node(cls): return select([JobRun.node]).where(cls.jobid == JobRun.jobid).as_scalar() @hybrid_property def sta_time(self): return self.runs[0].sta_time if self.runs else None @hybrid_property def sto_time(self): return self.runs[0].sto_time if self.runs else None @hybrid_property def runtime(self): return self.runs[0].runtime if self.runs else 0 def __json__(self, request): tz = request.registry.settings['timezone'] return {'jobid': self.jobid, 'scheduler': self.scheduler, 'jobname': self.jobname, 'prio': self.prio, 'wait': self.wait, 'owner': self.owner, 'node': self.node, 'runid': self.runid, 'status': self.status, 'rc': self.rc, 'est_exec_time': self.est_exec_time or 0, 'est_sta_time': pendulum.instance(self.est_sta_time, tz).isoformat() if self.est_sta_time else None, 'sta_time': pendulum.instance(self.sta_time, tz).isoformat() if self.sta_time else None, 'est_sto_time': (pendulum.instance(self.est_sta_time, tz) + timedelta(self.est_exec_time)).isoformat() if self.est_sta_time else None, 'sto_time': pendulum.instance(self.sto_time, tz).isoformat() if self.sto_time else None, 'runtime': self.runtime, 'runs': self.runs, # 'prereq': self.prereq, # 'provider': self.provider, } class JobRun(Base): __tablename__ = 'run' jobid = Column('jobid', Unicode(16), ForeignKey('jobs.jobid'), primary_key=True) runid = Column('runid', Integer) # status = Column('status', Unicode(3), nullable=False, index=True) node = Column('node', Unicode(64)) rc = Column('rc', CHAR(6)) # est_sta_time = Column('est_sta_time', DateTime) sta_time = Column('sta_time', DateTime, index=True) # est_sto_time = Column('est_sto_time', DateTime) sto_time = Column('sto_time', DateTime, index=True) @hybrid_property def runtime(self): if not self.sto_time or not self.sta_time: return None else: return (self.sto_time - self.sta_time).seconds / 60 def __json__(self, request): tz = request.registry.settings['timezone'] return {'jobid': self.jobid, 'runid': self.runid, 'node': self.node, 'rc': self.rc, 'sta_time': pendulum.instance(self.sta_time, tz).isoformat() if self.sta_time else None, # 'est_sto_time': self.est_sto_time.isoformat() if # self.est_sto_time else None, # 'sto_time': self.sto_time.isoformat() if self.sto_time # else None, } class Provider(Base): __tablename__ = 'provider' name = Column('name', Unicode(512), primary_key=True) jobid = Column('jobid', Unicode(16), ForeignKey('jobs.jobid')) def __json__(self, request): return {} class PreReq(Base): __tablename__ = 'prereq' jobid = Column('jobid', Unicode(16), ForeignKey('jobs.jobid'), primary_key=True) name = Column('name', Unicode(512), primary_key=True) cond = Column('cond', Boolean) status = Column('status', Boolean) def __json__(self, request): return {} engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool, echo=False) Base.metadata.create_all(engine) session_factory = sessionmaker(bind=engine) Session = scoped_session(session_factory) query = Session.query() jobfilter = Filter({'jobs': Job}, query) digraph = nx.Graph() def load(joblist, dayshift, tz): for job in joblist: insert_job(job['jobdata'].copy(), dayshift, tz) # print(digraph.nodes()) def insert_job(jobdata, dayshift, tz): # schedule = decode_jobid(jobdata['jobid'], dayshift, tz) jobid = jobdata['jobid'] jobdata.pop('source') for runid, rundata in enumerate(jobdata['runs']): Session.add(JobRun(runid=runid, jobid=jobid, **rundata)) # TODO: for provider in jobdata['provider']: provider_name = jobdata['provides'][-1] provider = Session.query(Provider).filter( Provider.name == provider_name).first() if provider: provider.jobid = jobdata['jobid'] else: provider = Provider(name=provider_name, jobid=jobdata['jobid']) digraph.add_node(provider) # END OF TODO del jobdata['runs'] del jobdata['provides'] del jobdata['prereq'] job = Job(**jobdata) Session.add(job) Session.commit() def insert_run(jobid, rundata): runid = Session.query(JobRun).filter(JobRun.jobid == jobid).count() Session.add(JobRun(jobid=jobid, runid=runid, )) try: Session.commit() except (IntegrityError, FlushError): Session.rollback() def modify(jobid, jobdata, rundata): job = Session.query(Job).filter(Job.jobid == jobid).one() for key in jobdata: setattr(job, key, jobdata[key]) try: # FIXME won't work when there's more than one run! run = Session.query(JobRun).filter(JobRun.jobid == jobid).one() for key in rundata: setattr(run, key, rundata[key]) except NoResultFound: insert_run(jobid, rundata.copy()) Session.commit() def filter(filters): if 'condition' not in filters: filters['condition'] = 'AND' if not filters['rules']: # If no filters are passed, force at least one so there's a query filters['rules'].append({'field': 'jobs.jobname', 'operator': 'contains', 'value': ''}) return jobfilter.querybuilder(filters).all() # return hola PK!GDopenbm/major/nodestores.pyimport os import asyncio # import logging import pydblite # from singleton_decorator import singleton class BaseNodeStore(object): def expand_group(self, group): if group is None: return [node['name'] for node in self.get_nodes()] elif not group.startswith('@'): return [group] return [node['node'] for node in self.groups(name=group) if node['node'] in self.get_nodes()] def select_node(self, node): nodes = self.expand_group(node) if len(nodes) == 1: try: node = self.nodes._name[nodes[0]][0] except IndexError: raise ValueError('node not active') if node['freeinits'] == 0: raise ValueError('no free inits') else: return node return max([node for node in self.nodes if self.nodes['name'] in nodes and self.nodes['freeinits'] > 0], key=lambda node: node['freeinits']) class SimpleNodeStore(BaseNodeStore): modified = False def __init__(self, config): self.condition = asyncio.Condition() asyncio.get_event_loop().create_task(self.condition.acquire()) dbgroups_path = os.path.join(config['minor']['data_dir'], 'gnodes.db') dbnodes_path = ':memory:' self.nodes = pydblite.Base(dbnodes_path) self.nodes.create('name', 'status', 'cpu', 'totalinits', 'freeinits', 'lastseen') self.nodes.create_index('name') self.groups = pydblite.Base(dbgroups_path) if self.groups.exists(): self.groups.open() else: self.groups.create('name', 'node') self.groups.create_index('name') self.add_node(config['minor']['node_name'], config['minor']['inits']) if int(config['minor']['inits']) > 0: self.modify_node(config['minor']['node_name'], status='ACT') # async def create_lock(self): # await self.condition.acquire() # async def send_update(self): # print('SENCINDG NOTIFY ') # self.modified = True # self.condition.notify_all() # await self.condition.acquire() def add_node(self, name, inits): if (name in self.nodes._name and self.nodes._name[name][0]['status'] != 'LST'): raise KeyError() # if value not in ('UNK', 'LST', 'CLS', 'FUL', 'ACT'): # raise ValueError(f'status {value} not valid for node {self.name}') self.nodes.delete(self.nodes._name[name]) status = 'ACT' if int(inits) > 0 else 'CLS' self.nodes.insert(name=name, status=status, cpu=100, totalinits=int(inits), freeinits=int(inits)) # asyncio.get_event_loop().create_task(self.send_update()) def modify_node(self, name, **values): record = self.nodes._name[name] if not record: raise IndexError self.nodes.update(record, **values) # asyncio.get_event_loop().create_task(self.send_update()) def delete_node(self, name): self.nodes.delete(self.nodes(name=name)) self.modified = True def get_node(self, **conditions): return self.nodes(**conditions) def get_nodes(self): return self.nodes def get_group(self, group): return self.groups(name=group) def add_group(self, group): pass def delete_group(self, group): pass def delete_node2group(self, group, name): pass # record = self.groups(group=group, name=name) # del self.groups[record] # self.groups.commit() # self.modified = True def add_node2group(self, group, name): self.groups.insert(name=group, node=name) self.groups.commit() # asyncio.get_event_loop().run_until_complete(self.send_update()) async def clear_nodes(self): pass class RaftNodeStore(BaseNodeStore): def __init__(self, config): pass PK!aopenbm/major/oscar.py"""a directed graph example.""" from sqlalchemy import Column, Integer, ForeignKey, \ create_engine from sqlalchemy.orm import relationship, sessionmaker from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Node(Base): __tablename__ = 'node' node_id = Column(Integer, primary_key=True) def higher_neighbors(self): return [x.higher_node for x in self.lower_edges] def lower_neighbors(self): return [x.lower_node for x in self.higher_edges] class Edge(Base): __tablename__ = 'edge' lower_id = Column( Integer, ForeignKey('node.node_id'), primary_key=True) higher_id = Column( Integer, ForeignKey('node.node_id'), primary_key=True) lower_node = relationship( Node, primaryjoin=lower_id == Node.node_id, backref='lower_edges') higher_node = relationship( Node, primaryjoin=higher_id == Node.node_id, backref='higher_edges') def __init__(self, n1, n2): self.lower_node = n1 self.higher_node = n2 engine = create_engine('sqlite:///oskitar', echo=True) Base.metadata.create_all(engine) session = sessionmaker(engine)() # create a directed graph like this: # n1 -> n2 -> n1 # -> n5 # -> n7 # -> n3 -> n6 n1 = Node() n2 = Node() n3 = Node() n4 = Node() n5 = Node() n6 = Node() n7 = Node() Edge(n1, n2) Edge(n1, n3) Edge(n2, n1) Edge(n2, n5) Edge(n2, n7) Edge(n3, n6) session.add_all([n1, n2, n3, n4, n5, n6, n7]) session.commit() assert [x for x in n3.higher_neighbors()] == [n6] assert [x for x in n3.lower_neighbors()] == [n1] assert [x for x in n2.lower_neighbors()] == [n1] assert [x for x in n2.higher_neighbors()] == [n1, n5, n7] PK!WT@@openbm/major/oskitarSQLite format 3@ . ~~poAtableedgeedgeCREATE TABLE edge ( lower_id INTEGER NOT NULL, higher_id INTEGER NOT NULL, PRIMARY KEY (lower_id, higher_id), FOREIGN KEY(lower_id) REFERENCES node (node_id), FOREIGN KEY(higher_id) REFERENCES node (node_id) )';indexsqlite_autoindex_edge_1edge]tablenodenodeCREATE TABLE node ( node_id INTEGER NOT NULL, PRIMARY KEY (node_id) )                               PK!openbm/major/triggers.pyimport asyncio import logging from apscheduler.triggers.base import BaseTrigger from apscheduler.triggers.date import DateTrigger from apscheduler.triggers.combining import BaseCombiningTrigger import pendulum from openbm.major.nodestores import (RaftNodeStore, SimpleNodeStore) # from openbm.major.catalogs import (SimpleCatalog) logger = logging.getLogger(__name__) class OBMTrigger(BaseCombiningTrigger): def __init__(self, tipology, node_name, job, schedule, tz): # triggers = [NodeTrigger(tipology, node_name, tz)] triggers = [] if schedule: triggers.append(ScheduleTrigger(run_date=schedule, timezone=tz)) # if jobdata['prepreq']: # triggers.append(DepTrigger(jobdata['prereq'])) super().__init__(triggers) def get_next_fire_time(self, previous_fire_time, now): fire_times = [trigger.get_next_fire_time(previous_fire_time, now) for trigger in self.triggers] return max(fire_times) class ScheduleTrigger(DateTrigger): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def get_next_fire_time(self, previous_fire_time, now): fire_time = super().get_next_fire_time(previous_fire_time, now) if fire_time is not None: return fire_time else: return now class NodeTrigger(BaseTrigger): def __init__(self, tipology, node, tz): if tipology == 'standalone': node_mgr = SimpleNodeStore() else: node_mgr = RaftNodeStore() self.ready = False if not node.startswith('@') and node not in node_mgr.get_nodes(): self.tz = tz asyncio.get_event_loop().create_task(self.poll(node_mgr.condition)) else: self.ready = True async def poll(self, node_ready): await node_ready.wait() self.ready = True def get_next_fire_time(self, previous_fire_time, now): if self.ready: return now else: return pendulum.instance(now).in_timezone(self.tz).add(minutes=1, seconds=2) PK!4openbm/major/utils.py# import pendulum # def decode_jobid(jobid, dayshift, tz): # """ Create a datetime instance along with the job name and schedule type # from jobid string""" # offset = int(jobid[-5:]) # juliandate = jobid[-8:-5] # schedule_type = jobid[-9] # job_name = jobid[:-10] # timestamp = pendulum.from_format(juliandate, 'DDDD', tz=tz) # if pendulum.now().format('DDDD') < juliandate: # timestamp = timestamp.subtract(years=1) # timestamp = timestamp.set(hour=dayshift.hour, minute=dayshift.minute, # second=dayshift.second) # timestamp = timestamp.add(seconds=offset) # return (timestamp, job_name, schedule_type) def dsndate_fmt(datetime): """ Converts a datetime object to a DSN time format. Given the following time object time(2017, 11, 3, 9) will return the following string: D171103 """ return f"D{datetime.strftime('%y%m%d')}" def dsntime_fmt(datetime): """ Converts a datetime object to a DSN time format. Given the following time object time(2017, 11, 3, 9, 11, 0, 20) will return the following string: T110020""" return f'T{datetime.strftime("%H%M%S")}' PK! openbm/major/webapps/__init__.pyPK!gAA!openbm/major/webapps/resources.py#!/usr/bin/env python # # Copyright (C) 2015 social2data S.A. # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # # from pyramid.security import Deny from pyramid.security import Allow, Deny # from pyramid.security import Everyone from pyramid.security import Authenticated from pyramid.security import ALL_PERMISSIONS from pyramid.httpexceptions import (HTTPBadRequest, HTTPConflict, # HTTPCreated, HTTPForbidden, HTTPNotFound, HTTPNotImplemented, HTTPOk) import json # import jsonschema # from jsonschema import validate import yaml import schema # from plugins.auth import SecurityException from openbm.plugins import auth from openbm.major import exceptions from openbm.minor import schemas class Root(object): __name__ = '' __parent__ = None def __init__(self, request): self.request = request def __getitem__(self, key): if key == 'schedule': return ScheduleList(self, self.request) elif key == 'users': return UserList(self, self.request) elif key == 'groups': return Group(self, self.request, [], key) elif key == 'jobs': return JobList(self, self.request) elif key == 'nodes': return NodeList(self, self.request) elif key == 'jobsets': return JobSetList(self, self.request) elif key == 'calendars': return Calendars() elif key == 'cluster': return Cluster() raise KeyError class Cluster(object): __name__ = 'cluster' __parent__ = Root def __init__(self): pass class UserList(object): def __init__(self, parent, request): self.__name__ = 'users' self.__parent__ = parent self.request = request self.scheduler = request.scheduler def __getitem__(self, key): return User(self, self.request, key) def get_list(self): return self.request.registry.auth.get_all_users() @property def __acl__(self): return [(Allow, 'root', ALL_PERMISSIONS), (Allow, 'admin', ALL_PERMISSIONS), (Allow, Authenticated, 'view_user') ] class User(object): def __init__(self, parent, request, userid): self.__name__ = userid self.__parent__ = parent self.request = request self.scheduler = request.scheduler self.userid = userid def get_user(self): try: return self.request.registry.auth.get_user(self.userid) except auth.NoSuchUser as e: raise HTTPNotFound(e) def add_user(self, password, **details): try: return self.request.registry.auth.create_user(self.userid, password, **details) except auth.DuplicateUser as e: raise HTTPConflict(e) except auth.NotImplemented as e: raise HTTPNotImplemented(e) def delete_user(self): try: self.request.registry.auth.delete_user(self.userid) except auth.NotImplemented as e: raise HTTPNotImplemented(e) @property def __acl__(self): acl = [(Allow, 'root', ALL_PERMISSIONS), (Allow, 'admin', ALL_PERMISSIONS), (Allow, self.userid, 'modify_user'), (Allow, self.userid, 'view_user'), ] if (any(group for group in self.request.registry.auth.user_groups(self.userid) if f'admin-{group}' in self.request.effective_principals[2:])): acl.append((Allow, self.request.authenticated_userid, ALL_PERMISSIONS)) return acl + [(Deny, Authenticated, ALL_PERMISSIONS)] class Group(object): def __init__(self, parent, request, group, key): self.__name__ = key self.__parent__ = parent self.request = request self.scheduler = request.scheduler if parent.__name__: self.group = group + [key] else: self.group = group def __getitem__(self, key): return Group(self, self.request, self.group, key) def create_group(self, desc): try: self.request.registry.auth.create_group(self.group, desc) except auth.NoSuchGroup as e: raise HTTPNotFound(e) except auth.DuplicateGroup as e: raise HTTPConflict(e) except auth.NotImplemented as e: raise HTTPNotImplemented(e) def get_group(self): try: return self.request.registry.auth.get_group(self.group) except auth.NoSuchGroup as e: raise HTTPNotFound(e) def delete_group(self): try: return self.request.registry.auth.delete_group(self.group) except auth.NoSuchGroup as e: raise HTTPNotFound(e) except auth.DeleteGroupChildren as e: raise HTTPConflict(e) except auth.NotImplemented as e: raise HTTPNotImplemented(e) @property def __acl__(self): return [(Allow, 'u:root', ALL_PERMISSIONS), (Allow, 'g:admin', ALL_PERMISSIONS), (Allow, f'g:admin-{self.__name__}', ALL_PERMISSIONS), (Allow, Authenticated, 'view_group') ] class NodeGroups(object): __name__ = 'nodegroups' __parent__ = Root def __init__(self): """ TODO """ class Calendars(object): __name__ = 'calendars' __parent__ = Root def __init__(self): pass class JobSetList(object): def __init__(self, parent, request): self.__name__ = 'jobsets' self.__parent__ = parent self.request = request self.scheduler = request.scheduler def __getitem__(self, key): return JobSet(self, self.request, key) def get_list(self): raise HTTPOk([dict(node) for node in self.scheduler('catalog.get_jobsets')]) @property def __acl__(self): return [(Allow, 'u:root', ALL_PERMISSIONS), (Allow, 'r:admin', ALL_PERMISSIONS), (Allow, 'r:configurator', 'config_jobset'), (Allow, Authenticated, 'view_jobset') ] class JobSet(object): def __init__(self, parent, request, jobsetname): self.__name__ = jobsetname self.__parent__ = parent self.request = request self.scheduler = request.scheduler self.jobset = jobsetname def add_jobset(self, starttime, endtime, totalslots, timezone): try: self.scheduler('catalog.add_jobset', self.jobset, starttime, endtime, totalslots, timezone) except exceptions.DuplicateJobSet: raise HTTPConflict(json_body={'status': 'error', 'code': 409, 'message': f'job set <{self.jobset}>' ' already exists'}) from None def get_jobset(self): raise HTTPOk([dict(self.scheduler('catalog.get_jobset', self.jobset))]) def modify_jobset(self, jobsetdata): try: self.scheduler('catalog.get_jobset', self.jobset) except exceptions.NoSuchJobSet: raise HTTPNotFound(f'job set <{self.jobset}> does not exists') self.scheduler('catalog.modify_jobset', self.jobset, jobsetdata) def delete_jobset(self): try: self.scheduler('catalog.delete_jobset', self.jobset) except exceptions.NoSuchJobSet: raise HTTPNotFound() @property def __acl__(self): return [(Allow, 'root', ALL_PERMISSIONS), (Allow, 'r:admin', ALL_PERMISSIONS), (Allow, f'r:admin-{self.jobset}', ALL_PERMISSIONS), (Allow, 'r:configurator', 'config_jobset'), (Allow, f'r:configurator-{self.jobset}', 'config_jobset'), (Allow, 'r:operator', 'oper_jobset'), (Allow, f'r:operator_{self.jobset}', 'oper_jobset'), ] class NodeList(object): def __init__(self, parent, request): self.__name__ = 'nodes' self.__parent__ = parent self.request = request self.scheduler = request.scheduler def __getitem__(self, key): return Node(self, self.request, key) def get_list(self): return self.scheduler('node_mgr.get_nodes') @property def __acl__(self): return [(Allow, Authenticated, 'view_node'), ] class Node(object): def __init__(self, parent, request, nodename): self.__name__ = nodename self.__parent__ = parent self.request = request self.scheduler = request.scheduler self.node = nodename # self.job = self.scheduler('get_job', jobname) def modify_node(self, inits): try: self.scheduler('node_mgr.modify_node', self.node, inits=inits) except IndexError: raise HTTPNotFound() @property def __acl__(self): perm = [(Allow, 'root', 'oper_node'), (Allow, 'opernode', 'oper_node'), (Allow, f'opernode_{self.node}', 'oper_node') ] for group in self.request.effective_principals[2:]: if (group != 'opernode' and group != f'opernode_{self.node}' and group.startswith('opernode')): perm.append((Allow, group, 'oper_node')) return perm class ScheduleList(object): def __init__(self, parent, request): self.__name__ = 'schedules' self.__parent__ = parent self.request = request self.scheduler = request.scheduler def __getitem__(self, key): return Schedule(self, self.request, key) # else: # raise KeyError @property def __acl__(self): return [(Allow, 'r:operator', ('view_job', 'sched_job', 'start_job', 'stop_job', 'comment_job')), (Allow, 'r:configurator', ('add_job', 'delete_job', 'view_job', 'edit_job', 'comment_job')), (Allow, Authenticated, ('view_schedule')), ] def get_list(self, query): try: query = json.loads(query) except Exception as e: raise HTTPBadRequest('Malformed query string: ' + e.__str__()) return self.scheduler('query_jobview', query) class Schedule(object): def __init__(self, parent, request, jobid): self.__name__ = jobid self.__parent__ = parent self.request = request self.scheduler = request.scheduler try: # self.job = self.scheduler('catalog.getjob_joblist', # jobid)['jobdata'] self.job = self.scheduler('query_jobview', {'rules': [{'field': 'jobs.jobid', 'operator': 'equal', 'value': self.__name__}]})[0] except IndexError: raise HTTPNotFound() from None def abort(self): if self.job['status'] != 'EXE': raise HTTPBadRequest('Job is not in EXE status') return self.scheduler('abort_job', self.__name__) def details(self): job = self.scheduler('query_jobview', {'rules': [{'field': 'jobs.jobid', 'operator': 'equal', 'value': self.__name__}]})[0] return job def delete_schedule(self): return self.scheduler('delete_schedule', self.__name__) def refresh_schedule(self): try: self.scheduler('refresh_schedule', self.__name__) except exceptions.NoSuchSchedule as e: raise HTTPNotFound(e) def source(self): return self.scheduler('catalog.getjob_joblist', self.__name__) @property def __acl__(self): return [(Allow, 'r:operator', ('view_job', 'sched_job', 'start_job', 'stop_job', 'comment_job')), (Allow, 'r:configurator', ('add_job', 'delete_job', 'view_job', 'edit_job', 'comment_job')), (Allow, Authenticated, ('view_job')), ] class JobList(object): def __init__(self, parent, request): self.__name__ = 'jobs' self.__parent__ = parent self.request = request self.scheduler = request.scheduler def __getitem__(self, key): return Job(self, self.request, key) def add_job(self, jobspec, replace=False): try: jobdict = yaml.load(jobspec) schemas.jobschema.validate(jobdict) except (yaml.YAMLError, schema.SchemaError) as e: raise HTTPBadRequest(e) if jobdict['owner'] not in self.request.effective_principals[2:]: raise HTTPForbidden(f'owner not authorized for job') try: self.scheduler('catalog.add_job_repo', jobdict['name'], jobspec) except exceptions.DuplicateJob as exc: if replace: self.delete_job(jobdict['name']) self.scheduler('catalog.add_job_repo', jobdict['name'], jobspec) else: raise HTTPConflict(exceptions.DuplicateJob(exc)) # FIXME falta la invocacion final!!! # raise HTTPCreated('job added') def validate_job(self, jobspec): try: jobdict = yaml.load(jobspec) schemas.jobschema.validate(jobdict) except (yaml.YAMLError, schema.SchemaError) as e: raise HTTPBadRequest(e) # raise HTTPOk('ok') def delete_job(self, jobname): try: self.scheduler('catalog.delete_job_repo', jobname) except exceptions.NoSuchJob as e: raise HTTPNotFound(e) def get_jobs(self): return [job['name'] for job in self.scheduler('catalog.get_all_jobs_repo')] @property def __acl__(self): return [(Allow, 'u:root', ALL_PERMISSIONS), (Allow, 'r:admin', ALL_PERMISSIONS), (Allow, 'r:configurator', 'config_jobs'), ] class Job(object): def __init__(self, parent, request, jobname): self.__name__ = jobname self.__parent__ = parent self.request = request self.scheduler = request.scheduler def get_job(self): try: return self.scheduler('catalog.get_job_repo', self.__name__) except exceptions.NoSuchJob as e: raise HTTPNotFound(e) def add_schedule(self, when): try: # FIXME timeargs['now'] = '1' if timeargs['now'] else None return self.scheduler('schedule_job', self.__name__, when) except exceptions.NoSuchJob as e: raise HTTPNotFound(e) except exceptions.ScheduleError as e: raise HTTPBadRequest(e) def delete_job(self): try: return self.scheduler('catalog.delete_job_repo', self.__name__) except exceptions.NoSuchJob as e: raise HTTPNotFound(e) @property def __acl__(self): return [(Allow, 'u:root', ALL_PERMISSIONS), (Allow, 'r:admin', ALL_PERMISSIONS), (Allow, 'r:monitor', 'view_jobs'), (Allow, 'r:monitor-{self.jobname}', 'view_jobs'), (Allow, 'r:configurator', 'config_jobs'), (Allow, 'r:configurator-{self.jobname}', 'config_jobs'), (Allow, 'r:operator', 'oper_jobs'), (Allow, 'r:operator-{self.jobname}', 'oper_jobs'), ] PK!f##openbm/major/webapps/webapi.pyfrom pyramid.view import view_config from pyramid.events import ContextFound # ApplicationCreated from pyramid.events import subscriber # from webapps.exceptions import RequestError # from datetime import datetime # from openbm.plugins import dataset_auth from pyramid.httpexceptions import (HTTPException, HTTPCreated, HTTPMovedPermanently, HTTPUnauthorized, # HTTPNotFound, HTTPInternalServerError, exception_response ) # from pyramid.view import forbidden_view_config # from pyramid.security import forget # import faulthandler from .resources import (UserList, User, Group, Node, NodeList, ScheduleList, Schedule, JobList, Job, JobSetList, JobSet, # Groups, # Calendars, Cluster ) @subscriber(ContextFound) def mysubscriber(event): if event.request.path_info == '/cluster': return True follower = event.request.scheduler('find_leader') if follower: raise HTTPMovedPermanently(location='http://' + ':'.join(map(str, follower)) + event.request.path_info) # @view_config(name='auth', request_method='POST', # request_param=('userid', 'passwd')) # def login(request): # """Returns Hello in JSON.""" # userid = request.POST['userid'] # passwd = request.POST['passwd'] # if request.registry.auth.check_credentials(userid, passwd): # groups = request.registry.auth.user_groups(userid) # return request.create_jwt_token(userid, groups=groups) # else: # raise HTTPUnauthorized() # Context UserList @view_config(context=UserList, request_method='GET', permission='admin_user') def view_userlist(request): return {'status': 'success', 'data': request.context.get_list()} # Context User @view_config(context=User, request_method='GET', permission='view_user') def view_user(request): return {'status': 'success', 'data': request.context.get_user()} @view_config(context=User, request_method='PUT', request_param=('password',), permission='admin_user') def create_user(request): request.context.add_user(**request.POST) raise HTTPCreated('user created') @view_config(context=User, request_method='DELETE', permission='admin_user') def delete_user(request): request.context.delete_user() return {'status': 'success', 'data': request.context.delete_user()} # Context Group @view_config(context=Group, request_method='GET', permission='view_group') def view_group(request): return {'status': 'success', 'data': request.context.get_group()} @view_config(context=Group, request_param=('description',), request_method='PUT', permission='create_group') def create_group(request): request.context.create_group(request.POST['description']) raise HTTPCreated('group created') @view_config(context=Group, request_method='DELETE', permission='create_group') def delete_group(request): request.context.delete_group() # Context NodeList @view_config(context=NodeList, request_method='GET', permission='view_node') def view_nodelist(request): return {'status': 'success', 'data': [dict(node) for node in request.context.get_list()]} # Context Node @view_config(context=Node, request_method='PATCH', request_param=('inits',), permission='config_node') def modify_node(request): return {'status': 'success', 'data': request.context.modify_node(int(request.POST['inits']))} # Context JobSetList @view_config(context=JobSetList, request_method='GET', permission='view_jobset') def view_jobsetlist(request): request.context.get_list() # Context JobSet @view_config(context=JobSet, request_method='GET', permission='view_jobset') def view_jobset(request): request.context.get_jobset() @view_config(context=JobSet, request_method='PUT', request_param=('starttime', 'endtime', 'totalslots'), permission='config_jobset') def add_jobset(request): request.context.add_jobset(request.POST['starttime'], request.POST['endtime'], request.POST['totalslots'], request.POST['timezone']) return HTTPCreated(json_body={'status': 'success', 'data': 'jobset added'}) @view_config(context=JobSet, request_method='PATCH', permission='config_jobset') def modify_jobset(request): return {'status': 'success', 'data': request.context.modify_jobset(request.POST)} @view_config(context=JobSet, request_method='DELETE', permission='config_jobset') def delete_jobset(request): return {'status': 'success', 'data': request.context.delete_jobset()} # Context ScheduleList @view_config(context=ScheduleList, request_method='GET', request_param=('query',), permission='view_schedule') def view_schedulelist(request): return {'status': 'success', 'data': (request.registry.settings['timezone'], request.context.get_list(request.GET['query']))} # Context Schedule @view_config(context=Schedule, name='details', request_method='GET', permission='view_schedule') def view_details(request): return request.context.details() @view_config(context=Schedule, name='abort', request_method='PUT', permission='view_schedule') def view_abort(request): return request.context.abort() @view_config(context=Schedule, name='source', request_method='GET', permission='view_schedule') def view_source(request): return {'status': 'success', 'data': request.context.source()} # Context JobList @view_config(context=JobList, request_method='GET', permission='view_job') def view_jobs(request): return {'status': 'success', 'data': request.context.get_jobs()} @view_config(context=JobList, request_method='POST', permission='config_job') def add_job(request): jobspec = request.POST['jobspec'] request.context.add_job(jobspec) return HTTPCreated(json_body={'status': 'success', 'data': 'job added'}) @view_config(context=JobList, request_method='POST', request_param=('validate'), permission='config_job') def validate_job(request): jobspec = request.POST['jobspec'] request.context.validate_job(jobspec) @view_config(context=JobList, request_method='PUT', permission='config_job') def modify_job(request): jobspec = request.POST['jobspec'] request.context.add_job(jobspec, replace=True) return {'status': 'success', 'data': 'job modified'} # Context Job @view_config(context=Job, request_method='GET', permission='view_job') def view_job(request): return {'status': 'success', 'data': request.context.get_job()['spec']} @view_config(context=Job, request_method='DELETE', permission='config_job') def delete_job(request): return {'status': 'success', 'data': request.context.delete_job()} @view_config(context=Job, name='schedule', request_method='POST', permission='config_job') def add_schedule(request): jobid = request.context.add_schedule(request.POST.get('schedule')) return HTTPCreated(json_body={'status': 'success', 'data': jobid}) @view_config(context=Job, name='refresh_schedule', request_method='PUT', permission='config_job') def refresh_schedule(request): return {'status': 'success', 'data': request.context.refresh_schedule()} # Context Cluster @view_config(context=Cluster, request_method='GET', permission='view_cluster') def cluster_info(request): # get_leader() return request.scheduler('cluster_status') @view_config(context=HTTPException) def error_view(exc, request): """Map any RequestError as the correct HTTPError exception""" if exc.code == 403 and request.authenticated_userid is None: exc = HTTPUnauthorized() if type(exc.detail) is list or type(exc.detail) is dict: message = exc.detail elif exc.detail: message = exc.detail.__str__() else: message = exc.explanation if exc.code <= 399: json_body = {'status': 'success', 'code': exc.code, 'data': message} else: json_body = {'status': 'error', 'code': exc.code, 'message': message} return exception_response(exc.code, json_body=json_body) @view_config(context=Exception) def exception_view(exc, request): raise HTTPInternalServerError() PK!ic!openbm/major/webapps/webserver.pyimport os import logging import threading from cheroot import wsgi import psutil from pyramid.config import Configurator from pyramid.renderers import JSON from pyramid.authorization import ACLAuthorizationPolicy from pyramid.authentication import BasicAuthAuthenticationPolicy as BasicAuth from openbm.major.exceptions import InternalSchedulerError # from openbm.major import dist_logging from openbm.plugins.load import (load_auth_backend, load_schema) from openbm.major.webapps.resources import Root from openbm.minor import schemas logger = logging.getLogger(__name__) def send_cmd(request, *args, **kwargs): endpoint.send((args, kwargs)) repl = endpoint.recv() if isinstance(repl, InternalSchedulerError): raise repl.re_raise() else: return repl def check_credentials(backend): def check_credentials_backend(user, passwd, request): return backend.check_credentials(user, passwd) return check_credentials_backend def wsgi_webapi(config): logger.debug(f'Creating webapi wsgi application') authz_policy = ACLAuthorizationPolicy() json_renderer = JSON() # json_renderer.add_adapter(apscheduler.job.Job, job_adapter) settings = Configurator() settings.set_root_factory(Root) auth_backend = load_auth_backend(config) # backend.load_pyramid(config, settings) settings.add_request_method(send_cmd, 'scheduler', reify=False) settings.add_renderer(None, json_renderer) settings.add_settings({'timezone': config['major']['timezone']}) settings.registry.config = config settings.registry.auth = auth_backend authn_policy = BasicAuth(check_credentials(auth_backend)) settings.set_authentication_policy(authn_policy) settings.set_authorization_policy(authz_policy) # config.add_settings({'exclog.extra_info': 'true'}) settings.add_settings( {'exclog.ignore': """pyramid.httpexceptions.HTTPSuccessful pyramid.httpexceptions.HTTPRedirection pyramid.httpexceptions.HTTPClientError pyramid.httpexceptions.HTTPNotImplemented """}) settings.include('pyramid_exclog') # config.include("pyramid_jwt") # config.include("pyramid_httpauth") # config.set_jwt_authentication_policy('secret') settings.scan("openbm.major.webapps") # faulthandler.dump_traceback_later(5) return settings.make_wsgi_app() def parent_notifier(): server = psutil.Process() parent = server.parent() logger.debug('Parent notifier thread is running') parent.wait() server.terminate() def run(config, cmdpipe, logpipe, event): # logger.addHandler(dist_logging.PipeHandler(logpipe)) notifier = threading.Thread(target=parent_notifier) notifier.start() logger.debug(f'Operation Manager PID: {os.getpid()}') address = config.get('major', 'om_address') port = config.getint('major', 'om_port') global endpoint endpoint = cmdpipe logger.debug(f'Operation Manager will listen on {address}:{port}') server = wsgi.Server((address, port), wsgi_webapi(config), numthreads=1) for executor in load_schema('executors'): logger.debug(f'Loading schema for executor {executor.name}') schemas.add_executor_plugins(executor.name, executor.plugin.SCHEMA) for resolver in load_schema('resolvers'): logger.debug(f'Loading schema for resolver {resolver.name}') schemas.add_resolver_plugins(resolver.name, resolver.plugin.SCHEMA) logger.info(f'Operation Manager is listening on {address}:{port}') try: event.set() server.start() except KeyboardInterrupt: logger.info('Stopping Operation Manager') send_cmd(None, None) except Exception as ex: logger.critical(ex) send_cmd(None, None) PK!@r-r-openbm/minor/__init__.py#!/usr/bin/env python # # Copyright (C) 2016 Oscar Curero # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # import asyncio # from asyncio import FIRST_COMPLETED import itertools import logging import os import pickle # import pytz # import time from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.executors.pool import ProcessPoolExecutor from apscheduler import events # from apscheduler.util import asint from boolrule import BoolRule import pendulum import pydblite import websockets from openbm.exceptions import (JobError, JobAbend, ) from openbm.plugins.load import (load_executors) # from websockets.exceptions import ConnectionClosed # from openbm.utils import dsndate_fmt loop = asyncio.get_event_loop() logger = logging.getLogger('openbm') # logging.getLogger('apscheduler.executors.node').addFilter(r) class MinorScheduler(AsyncIOScheduler): def __init__(self, config, **options): logger.debug(f'Starting a {self.__class__.__name__} scheduler') self.config = config # executors = {'mgr': ThreadPoolExecutor(10)} self.name = config['minor'].pop('node_name') self.datadir = config['minor'].pop('data_dir') if not os.path.exists(os.path.join(self.datadir, 'joblogs')): os.mkdir(os.path.join(self.datadir, 'joblogs')) self.security = config['minor'].getboolean('security', 'on') self.cert = config['minor'].pop('certificate') self.usedinits = 0 # self.addr = config.pop('address') # self.port = config.pop('port') # self.tz = config['timezone'] FIXME # logger.info(f'Scheduler starting in {self.tz} timezone') logging.getLogger('asyncio').setLevel(logging.DEBUG) logger.debug(f'Process PID {os.getpid()}') super().__init__(**options) local_jobs_path = os.path.join(self.datadir, 'localjobs.db') self.local_jobs = pydblite.Base(local_jobs_path) if self.local_jobs.exists(): self.local_jobs.open() else: self.local_jobs.create('jobid', 'step', 'executor') self.local_jobs.create_index('jobid') async def start(self): # self.add_listener(self._event_subjob, mask=events.EVENT_JOB_ADDED) self.add_listener(self._event_execjob, mask=events.EVENT_JOB_SUBMITTED) # self.add_listener(self._event_abendjob, mask=events.EVENT_JOB_ERROR) # self.add_listener(self._event_endedjob, mask=even.EVENT_JOB_EXECUTED) major_ready = asyncio.Event() asyncio.get_event_loop().create_task(self._ws_handle(major_ready)) await major_ready.wait() # await self._ws_handle() self.executors = load_executors(self) super().start() logger.info(f'OpenBatchManager is now active in node {self.name}') async def stop(self): pass def _configure(self, config): if 'executors' not in config: config['executors'] = {} if 'jobstores' not in config: config['jobstores'] = {} if 'triggers' not in config: config['triggers'] = {} config['jobstores']['int'] = MemoryJobStore() config['executors']['jobs'] = ProcessPoolExecutor(5) # config['timezone'] = self.tz FIXME config['job_defaults'] = {'coalesce': False} # self.mgr_sch.start() super()._configure(config) async def _ws_handle(self, major_ready): proto = 'wss://' if self.security else 'ws://' servers = self.config['minor']['servers'].split(',') for connection in itertools.cycle(servers): logger.debug(f'Connecting to {proto}{connection}') server = websockets.connect(f'{proto}{connection}/{self.name}') try: self.ws = await asyncio.wait_for(server, 5) # await self.ws.send('2') major_ready.set() logger.info(f'Connected to major node {connection}') await self._ws_recv() except websockets.exceptions.ConnectionClosed as ex: if ex.code == 4000: logger.critical(f'Node {self.name} already' f' exist in server {connection}') return except Exception as ex: # await server.close() if isinstance(ex, asyncio.TimeoutError): ex = 'timeout' logger.error(f'Connection to {connection} failed: {ex}') await asyncio.sleep(5) async def _ws_recv(self): while True: message = await self.ws.recv() method, args, kwargs, need_response = pickle.loads(message) response = await self.dispatcher[method](*args, **kwargs) print('estamos en handler') if need_response: print('enviamos respuesta') print(response) rrr = pickle.dumps(response) print(rrr) await self.ws.send(pickle.dumps(response)) def _event_subjob(self, event): if event.jobstore == 'major': logger.debug(f'Job {event.job_id} submitted') self._manage_jobstatus(event.job_id, {'status': 'SCH'}) def _event_execjob(self, event): if event.jobstore == 'major': self.usedinits += 1 logger.debug(f'Job {event.job_id} executed') self._manage_jobstatus(event.job_id, {'status': 'SUB'}) if 'MajorScheduler' in self.__class__.mro(): self.node_mgr.modify_node(self.name, usedinits=self.usedinits) def _event_abendjob(self, event): if event.jobstore == 'major': self.usedinits -= 1 logger.debug(f'Job {event.job_id} as ended abnormally') self._manage_jobstatus(event.job_id, {'status': 'ABD'}, {'rc': str(event.exception)}) if 'MajorScheduler' in self.__class__.mro(): self.node_mgr.modify_node(self.name, usedinits=self.usedinits) def _event_endedjob(self, event): if event.jobstore == 'major': self.usedinits -= 1 logger.debug(f'Job {event.job_id} executed') if not event.retval: event.retval = 'END_OK' self._manage_jobstatus(event.job_id, {'status': 'FOK'}, {'rc': event.retval}) if 'MajorScheduler' in self.__class__.mro(): self.node_mgr.modify_node(self.name, usedinits=self.usedinits) # @staticmethod async def _execjob(self, jobid, jobspec): self._manage_jobstatus(jobid, {'status': 'EXE'}, {'sta_time': pendulum.now('UTC')}) logger.info(f'Job {jobid} is running') joblog = logging.getLogger(jobid) joblog_file = logging.FileHandler(os.path.join(self.datadir, 'joblogs', f'{jobid}')) joblog_file.setFormatter(logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s')) joblog.addHandler(joblog_file) joblog.info(f'Job {jobid} started on node {self.name}') env = {'params': {}, 'exception': None, 'steps': {}, 'vars': {}} for num, step in enumerate(jobspec['source']): name = step.get('id', f'step{num}') joblog.info(f'Step {name} started') runcond = BoolRule(step.get('runcond', 'exception is None')) abdrule = step.get('abdcond', f'steps.{name}.exception is None') if not runcond.test(env): env['steps'][name] = {'exception': 'Flush'} logger.info(f'Step {name} FLUSHed for job {jobid}') continue try: exec_name = step.get('type', 'cmd') # default executor if exec_name not in self.executors.entry_points_names(): raise JobError() from \ ValueError(f'executor {exec_name} not found') self.local_jobs.insert(jobid, num, exec_name) self.local_jobs.commit() logger.info(f'Started {name} step for job {jobid}') executor = self.executors.map_method(lambda ext, *args: exec_name == ext.name, 'exec_step', jobid, step, env)[0] ret = await executor or {} env['steps'][name] = {'exception': None, **ret} except (JobAbend, JobError) as ex: exc_name = ex.__class__.__name__ exc_info = repr(ex) if isinstance(repr(ex), dict) else {} env['steps'][name] = {'exception': exc_name, **exc_info} last_exc = ex finally: if not BoolRule(abdrule).test(env): if env['steps'][name]['exception']: env['exception'] = env['steps'][name]['exception'] else: env['exception'] = ValueError('ABDCOND') last_exc = env['exception'] logger.info(f'Step {name} failed at condition {abdrule}') else: logger.info(f'Ended {name} step for job {jobid}') if env['exception']: logger.info(f'Job {jobid} ENDED ABNORMALLY') rc = str(last_exc) status = 'ABD' else: logger.info(f'Job {jobid} ended') rc = 'END_OK' status = 'FOK' self._manage_jobstatus(jobid, {'status': status}, {'rc': rc, 'sto_time': pendulum.now('UTC')}) self.local_jobs.delete(self.local_jobs(jobid=jobid)) async def abort_job(self, jobid): logger.debug(f'Abort task started for jobid {jobid}') job = self.local_jobs._jobid[jobid][0] executor = self.executors.map_method(lambda ext, *args: job['executor'] == ext.name, 'abort_job', jobid)[0] await executor logger.info(f'Jobid {jobid} has been aborted') def _manage_jobstatus(self, jobid, status, rundata={}): print(status, '<--minor') # TODO: Use RPC to execute manage_jobstatus remotelly def recv_job_proc(self, coro=None): coro.set_daemon() logger.debug('receive job queue started') while True: jobid, runid, jobspec = yield coro.receive() super().add_job(self._execjob, jobstore='node', id=f'{jobid}:{runid}', args=(jobspec, jobid, runid), executor='process') PK!--openbm/minor/__init__.py.orig#!/usr/bin/env python # # Copyright (C) 2016 Oscar Curero # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # import asyncio # from asyncio import FIRST_COMPLETED import itertools import logging import os import pickle # import pytz # import time from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.executors.pool import ProcessPoolExecutor from apscheduler import events # from apscheduler.util import asint from boolrule import BoolRule import pendulum import pydblite import websockets from openbm.exceptions import (JobError, JobAbend, ) from openbm.plugins.load import (load_executors) # from websockets.exceptions import ConnectionClosed # from openbm.utils import dsndate_fmt loop = asyncio.get_event_loop() logger = logging.getLogger('openbm') # logging.getLogger('apscheduler.executors.node').addFilter(r) class MinorScheduler(AsyncIOScheduler): def __init__(self, config, **options): logger.debug(f'Starting a {self.__class__.__name__} scheduler') self.config = config # executors = {'mgr': ThreadPoolExecutor(10)} self.name = config['minor'].pop('node_name') self.datadir = config['minor'].pop('data_dir') if not os.path.exists(os.path.join(self.datadir, 'joblogs')): os.mkdir(os.path.join(self.datadir, 'joblogs')) self.security = config['minor'].getboolean('security', 'on') self.cert = config['minor'].pop('certificate') self.usedinits = 0 # self.addr = config.pop('address') # self.port = config.pop('port') # self.tz = config['timezone'] FIXME # logger.info(f'Scheduler starting in {self.tz} timezone') logging.getLogger('asyncio').setLevel(logging.DEBUG) logger.debug(f'Process PID {os.getpid()}') super().__init__(**options) local_jobs_path = os.path.join(self.datadir, 'localjobs.db') self.local_jobs = pydblite.Base(local_jobs_path) if self.local_jobs.exists(): self.local_jobs.open() else: self.local_jobs.create('jobid', 'step', 'executor') self.local_jobs.create_index('jobid') async def start(self): # self.add_listener(self._event_subjob, mask=events.EVENT_JOB_ADDED) self.add_listener(self._event_execjob, mask=events.EVENT_JOB_SUBMITTED) self.add_listener(self._event_abendjob, mask=events.EVENT_JOB_ERROR) self.add_listener(self._event_endedjob, mask=events.EVENT_JOB_EXECUTED) major_ready = asyncio.Event() asyncio.get_event_loop().create_task(self._ws_handle(major_ready)) await major_ready.wait() # await self._ws_handle() self.executors = load_executors(self) super().start() logger.info(f'OpenBatchManager is now active in node {self.name}') async def stop(self): pass def _configure(self, config): if 'executors' not in config: config['executors'] = {} if 'jobstores' not in config: config['jobstores'] = {} if 'triggers' not in config: config['triggers'] = {} config['jobstores']['int'] = MemoryJobStore() config['executors']['jobs'] = ProcessPoolExecutor(5) # config['timezone'] = self.tz FIXME config['job_defaults'] = {'coalesce': False} # self.mgr_sch.start() super()._configure(config) async def _ws_handle(self, major_ready): proto = 'wss://' if self.security else 'ws://' servers = self.config['minor']['servers'].split(',') for connection in itertools.cycle(servers): logger.debug(f'Connecting to {proto}{connection}') server = websockets.connect(f'{proto}{connection}/{self.name}') try: self.ws = await asyncio.wait_for(server, 5) # await self.ws.send('2') major_ready.set() logger.info(f'Connected to major node {connection}') await self._ws_recv() except websockets.exceptions.ConnectionClosed as ex: if ex.code == 4000: logger.critical(f'Node {self.name} already' f' exist in server {connection}') return except Exception as ex: # await server.close() if isinstance(ex, asyncio.TimeoutError): ex = 'timeout' logger.error(f'Connection to {connection} failed: {ex}') await asyncio.sleep(5) async def _ws_recv(self): while True: message = await self.ws.recv() method, args, kwargs, need_response = pickle.loads(message) response = await self.dispatcher[method](*args, **kwargs) print('estamos en handler') if need_response: print('enviamos respuesta') print(response) rrr = pickle.dumps(response) print(rrr) await self.ws.send(pickle.dumps(response)) def _event_subjob(self, event): if event.jobstore == 'major': logger.debug(f'Job {event.job_id} submitted') self._manage_jobstatus(event.job_id, {'status': 'SCH'}) def _event_execjob(self, event): if event.jobstore == 'major': self.usedinits += 1 logger.debug(f'Job {event.job_id} is now running') self._manage_jobstatus(event.job_id, {'status': 'SUB'}) if 'MajorScheduler' in self.__class__.mro(): self.node_mgr.modify_node(self.name, usedinits=self.usedinits) def _event_abendjob(self, event): if event.jobstore == 'major': self.usedinits -= 1 logger.debug(f'Job {event.job_id} as ended abnormally') self._manage_jobstatus(event.job_id, {'status': 'ABD'}, {'rc': str(event.exception)}) if 'MajorScheduler' in self.__class__.mro(): self.node_mgr.modify_node(self.name, usedinits=self.usedinits) def _event_endedjob(self, event): if event.jobstore == 'major': self.usedinits -= 1 logger.debug(f'Job {event.job_id} ended') if not event.retval: event.retval = 'END_OK' self._manage_jobstatus(event.job_id, {'status': 'FOK'}, {'rc': event.retval}) if 'MajorScheduler' in self.__class__.mro(): self.node_mgr.modify_node(self.name, usedinits=self.usedinits) # @staticmethod async def _execjob(self, jobid, jobspec): self._manage_jobstatus(jobid, {'status': 'EXE'}, {'sta_time': pendulum.now('UTC')}) logger.info(f'Job {jobid} is running') joblog = logging.getLogger(jobid) joblog_file = logging.FileHandler(os.path.join(self.datadir, 'joblogs', f'{jobid}')) joblog_file.setFormatter(logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s')) joblog.addHandler(joblog_file) joblog.info(f'Job {jobid} started on node {self.name}') env = {'params': {}, 'exception': None, 'steps': {}, 'vars': {}} for num, step in enumerate(jobspec['source']): name = step.get('id', f'step{num}') joblog.info(f'Step {name} started') runcond = BoolRule(step.get('runcond', 'exception is None')) abdrule = step.get('abdcond', f'steps.{name}.exception is None') if not runcond.test(env): env['steps'][name] = {'exception': 'Flush'} logger.info(f'Step {name} FLUSHed for job {jobid}') continue try: exec_name = step.get('type', 'cmd') # default executor if exec_name not in self.executors.entry_points_names(): raise JobError() from \ ValueError(f'executor {exec_name} not found') self.local_jobs.insert(jobid, num, exec_name) self.local_jobs.commit() logger.info(f'Started {name} step for job {jobid}') executor = self.executors.map_method(lambda ext, *args: exec_name == ext.name, 'exec_step', jobid, step, env)[0] ret = await executor or {} env['steps'][name] = {'exception': None, **ret} except (JobAbend, JobError) as ex: exc_name = ex.__class__.__name__ exc_info = repr(ex) if isinstance(repr(ex), dict) else {} env['steps'][name] = {'exception': exc_name, **exc_info} # last_exc = ex finally: self.local_jobs.delete(self.local_jobs(jobid=jobid)) if not BoolRule(abdrule).test(env): if env['steps'][name]['exception']: env['exception'] = env['steps'][name]['exception'] else: logger.info(f'Step {name} failed at abdcond {abdrule}') raise JobAbend() from ValueError('failed at abdcond:' + abdrule, str(env)) else: logger.info(f'Ended {name} step for job {jobid}') # if env['exception']: # logger.info(f'Job {jobid} ENDED ABNORMALLY') # raise last_exc # rc = str(last_exc) # status = 'ABD' # else: # logger.info(f'Job {jobid} ended') # rc = 'END_OK' # status = 'FOK' # self._manage_jobstatus(jobid, {'status': status}, {'rc': rc, # 'sto_time': pendulum.now('UTC')}) async def abort_job(self, jobid): logger.debug(f'Abort task started for jobid {jobid}') job = self.local_jobs._jobid[jobid][0] executor = self.executors.map_method(lambda ext, *args: job['executor'] == ext.name, 'abort_job', jobid)[0] await executor logger.info(f'Jobid {jobid} has been aborted') def _manage_jobstatus(self, jobid, status, rundata={}): print(status, '<--minor') # TODO: Use RPC to execute manage_jobstatus remotelly def recv_job_proc(self, coro=None): coro.set_daemon() logger.debug('receive job queue started') while True: jobid, runid, jobspec = yield coro.receive() super().add_job(self._execjob, jobstore='node', id=f'{jobid}:{runid}', args=(jobspec, jobid, runid), executor='process') PK!I۰openbm/minor/schemas.py#!/usr/bin/env python # # Copyright (C) 2016 Oscar Curero # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # from schema import ( And, Forbidden, Optional, Or, Regex, Schema, ) definitions = {'base_schedule': { Optional('type'): Or('hourly', 'daily', 'weekly', 'monthly', 'yearly'), Optional('festive'): Or('yes', 'no', 'only', 'day-before', 'day-after'), Optional('target'): str, Optional('start_date'): str, Optional('end_date'): str, Optional('timezone'): str, Optional('year'): int, Optional('month'): And(int, lambda m: 0 < m <= 12), Optional('day'): And(int, lambda d: 0 < d <= 31), Optional('week'): And(int, lambda w: 0 < w <= 52), Optional('day_of_week'): Or(str, int), Optional('hour'): And(int, lambda h: 0 <= h <= 23), }, 'base_step': { Optional('id'): And(str, lambda s: s[0].isalpha()), Optional('params'): dict, Optional('runcond'): str, Optional('abdcond'): str, }, 'base_prereq': { Optional('conditional'): bool, }, 'valid_execs': [], 'valid_resolvs': [], } stepschema = [ And([Schema({**definitions['base_step'], **{Optional('type'): lambda s: s not in definitions['valid_execs']}}, ignore_extra_keys=True)], lambda s: len(s) > 0)] prereqsschema = [] job = { 'name': str, 'owner': str, 'steps': Or(*stepschema), Optional('jobset'): And([str], lambda l: len(l) > 0), Optional('description'): str, Forbidden('scheduler'): object, Optional('host'): str, Optional('prereqs'): Or(*prereqsschema), Optional('provides'): And([str], lambda s: len(s) > 0), Optional('schedule'): Or(str, And([Schema( {**definitions['base_schedule'], **{Optional('minute'): Regex('\*|/|-'), Optional('second'): Regex('\*|/|-'), }}, ignore_extra_keys=True)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule'], **{Optional('minute'): Regex('\*|/|-'), Optional('second'): int, }}, ignore_extra_keys=True)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule'], **{Optional('minute'): Regex('\*|/|-'), Optional('second'): int, }}, ignore_extra_keys=True)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule'], **{Optional('minute'): int, Optional('second'): int, }}, ignore_extra_keys=True)], lambda s: len(s) > 0), ) } jobschema = Schema(job, ignore_extra_keys=True) def add_resolver_plugins(name, resolver_schema): if resolver_schema: if name == 'job': # Default resolver resolver_type = {Optional('type'): 'job'} else: resolver_type = {'type': lambda s: s == name} resolver_schema = And([Schema({**definitions['base_prereq'], **resolver_type, **resolver_schema}, ignore_extra_keys=True)], lambda s: len(s) > 0) prereqsschema.append(resolver_schema) global job job[Optional('prereqs')] = Or(*prereqsschema) global jobschema jobschema = Schema(job, ignore_extra_keys=True) definitions['valid_resolvs'].append(name) def add_executor_plugins(name, executor_schema): if executor_schema: if name == 'cmd': # Default executor executor_type = {Optional('type'): 'cmd'} else: executor_type = {'type': name} executor_schema = And([Schema({**definitions['base_step'], **executor_type, **executor_schema}, ignore_extra_keys=True)], lambda s: len(s) > 0) stepschema.append(executor_schema) global job job['steps'] = Or(*stepschema) global jobschema jobschema = Schema(job, ignore_extra_keys=True) definitions['valid_execs'].append(name) PK!Sq VVopenbm/minor/schemas.py.orig#!/usr/bin/env python # # Copyright (C) 2016 Oscar Curero # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # from schema import ( And, Forbidden, Optional, Or, Regex, Schema, ) definitions = {'base_schedule': { Optional('type'): Or('hourly', 'daily', 'weekly', 'monthly', 'yearly'), Optional('festive'): Or('yes', 'no', 'only', 'day-before', 'day-after'), Optional('target'): str, Optional('start_date'): str, Optional('end_date'): str, Optional('timezone'): str, Optional('year'): int, Optional('month'): And(int, lambda m: 0 < m <= 12), Optional('day'): And(int, lambda d: 0 < d <= 31), Optional('week'): And(int, lambda w: 0 < w <= 52), Optional('day_of_week'): Or(str, int), Optional('hour'): And(int, lambda h: 0 <= h <= 23), }, 'base_step': { Optional('id'): And(str, lambda s: s[0].isalpha()), Optional('params'): dict, Optional('runcond'): str, Optional('abdcond'): str, }, 'base_prereq': { Optional('conditional'): bool, }, 'valid_execs': [], 'valid_resolvs': [], } stepschema = [ And([Schema({**definitions['base_step'], **{Optional('type'): lambda s: s not in definitions['valid_execs']}}, ignore_extra_keys=True)], lambda s: len(s) > 0)] prereqsschema = [[{'name': str}]] job = { 'name': str, 'owner': str, 'steps': Or(*stepschema), Optional('jobset'): And([str], lambda l: len(l) > 0), Optional('description'): str, Forbidden('scheduler'): object, Optional('host'): str, Optional('prereq'): Or(*prereqsschema), Optional('provides'): And([str], lambda s: len(s) > 0), Optional('schedule'): Or(str, And([Schema( {**definitions['base_schedule'], **{'minute': Regex('\*|/|-'), 'second': Regex('\*|/|-'), }}, ignore_extra_keys=False)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule'], **{'minute': Regex('\*|/|-'), 'second': int, }}, ignore_extra_keys=False)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule'], **{'minute': Regex('\*|/|-'), 'second': int, }}, ignore_extra_keys=False)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule'], **{'minute': int, 'second': int, }}, ignore_extra_keys=False)], lambda s: len(s) > 0), And([Schema( {**definitions['base_schedule']}, ignore_extra_keys=False)], lambda s: len(s) > 0), ) } jobschema = Schema(job, ignore_extra_keys=False) def add_resolver_plugins(name, resolver_schema): if resolver_schema: if name == 'job': # Default resolver resolver_type = {Optional('type'): 'job'} else: resolver_type = {'type': lambda s: s == name} resolver_schema = And([Schema({**definitions['base_prereq'], **resolver_type, **resolver_schema}, ignore_extra_keys=True)], lambda s: len(s) > 0) prereqsschema.append(resolver_schema) global job job[Optional('prereqs')] = Or(*prereqsschema) global jobschema jobschema = Schema(job, ignore_extra_keys=True) definitions['valid_resolvs'].append(name) def add_executor_plugins(name, executor_schema): if executor_schema: if name == 'cmd': # Default executor executor_type = {Optional('type'): 'cmd'} else: executor_type = {'type': name} executor_schema = And([Schema({**definitions['base_step'], **executor_type, **executor_schema}, ignore_extra_keys=True)], lambda s: len(s) > 0) stepschema.append(executor_schema) global job job['steps'] = Or(*stepschema) global jobschema jobschema = Schema(job, ignore_extra_keys=True) definitions['valid_execs'].append(name) PK!g: : openbm/openbmd.py#!/usr/bin/env python # # Copyright (C) 2015 social2data S.A. # # This file is part of OpenBatchManager. # # OpenBatchManager free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenBatchManager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenBatchmanager. If not, see . # import asyncio import configparser import logging import click from openbm import (minor, major ) loop = asyncio.get_event_loop() @click.command() @click.argument('config_file', type=click.Path(exists=True)) @click.option('--initialize', 'init', is_flag=True, default=False, help='initialize job scheduling') @click.option('--loglevel', '-l', 'log_level', type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']), default='info', help='set verbose level (default "info")') def main(config_file, init, log_level): """Execute an openbm scheduler""" logformat = '%(asctime)s%(msecs)d %(name)s:%(levelname)s:%(message)s' log_level = getattr(logging, log_level.upper()) logging.basicConfig(level=log_level, format=logformat) logger = logging.getLogger('openbm') if logging.getLogger().level > logging.DEBUG: logging.getLogger('apscheduler').setLevel(logging.ERROR) logger.info(f'Starting OpenBatchManager version 0.1') config = configparser.ConfigParser() config.read(config_file) if (config.has_section('major') and config.getboolean('major', 'server', fallback=False)): if config.getboolean('major', 'cluster'): scheduler = major.ClusterMajorScheduler(config) else: scheduler = major.StandaloneMajorScheduler(config) else: scheduler = minor.MinorScheduler(config) logger.debug('Initialitation completed') loop.run_until_complete(scheduler.start(init)) PK!openbm/plugins/__init__.pyPK![m. . openbm/plugins/auth/__init__.pyimport abc import logging from dataclasses import dataclass, field auth_logger = logging.getLogger('auth_backend') class SecurityError(BaseException): pass class NotImplemented(SecurityError): def __init__(self): super().__init__(f'sorry, feature not implemented') class NoSuchUser(SecurityError): def __init__(self, msg): super().__init__(f'user {msg} does not exists') class NoSuchGroup(SecurityError): def __init__(self, msg): super().__init__(f'group /{msg} does not exists') class DuplicateUser(SecurityError): def __init__(self, msg): super().__init__(f'user {msg} already exists') class DuplicateGroup(SecurityError): def __init__(self, msg): super().__init__(f'group /{msg} already exists') class DeleteGroupChildren(SecurityError): def __init__(self, msg): super().__init__(f'group /{msg} contains children') @dataclass(frozen=True) class UserBase: userid: str name: str groups: tuple preferredLanguage: str = field(init=False) class RootGroup(object): name = 'root' description = 'Root group' class SecurityBase(metaclass=abc.ABCMeta): """Base class for example plugin used in the tutorial. """ @abc.abstractmethod def test_plugin(self): """Used in tests""" @abc.abstractmethod def check_credentials(self, userid, password, domain=None): """Format the data and return unicode text. :param data: A dictionary with string keys and simple types as values. :type data: dict(str:?) :returns: Iterable producing the formatted text. """ @abc.abstractmethod def user_groups(self, userid, domain=None): """Format the data and return unicode text. :param data: A dictionary with string keys and simple types as values. :type data: dict(str:?) :returns: Iterable producing the formatted text. """ @abc.abstractmethod def delete_user(self, userid): """Format the data and return unicode text. :param data: A dictionary with string keys and simple types as values. :type data: dict(str:?) :returns: Iterable producing the formatted text. """ @abc.abstractmethod def delete_group(self): """Format the data and return unicode text. :param data: A dictionary with string keys and simple types as values. :type data: dict(str:?) :returns: Iterable producing the formatted text. """ PK!T'openbm/plugins/auth/insecure.pyimport logging from dataclasses import dataclass from openbm.plugins import auth logger = logging.getLogger(__name__) @dataclass(frozen=True) class User(auth.UserBase): email: str class InsecureAuth(auth.SecurityBase): def test_plugin(): return InsecureAuth({'email': None}) def __init__(self, config): logger.warning('Use of insecure backend, as its name implies, is ' 'insecure') if 'email' not in config: raise ValueError('email option not found in insecure_auth section') self.user = User(userid='root', name='root', email=config['email'], groups=('root')) super().__init__() def check_credentials(self, userid, passwd): return ['r:admin', '/root'] def create_user(self, userid, passwd, **kwargs): raise auth.NotImplemented() def create_group(self, group_path, desc): raise auth.NotImplemented() def delete_user(self, userid): raise auth.NotImplemented() def delete_group(self, group_path): raise auth.NotImplemented() def change_password(self, userid, oldpasswd, newpasswd): raise auth.NotImplemented() def get_all_users(self): return ['root'] def get_user(self, userid): return self.user def get_group(self, group_path): return {'group': 'root', 'path': '/root', 'children': [], 'group_description': 'root', 'users': 'root'} def add_user_group(self, groupid, userid): raise auth.NotImplemented() def user_groups(self, userid): return ['root'] def group_users(self, group_path): return [self.user] PK!dF44!openbm/plugins/auth/sqlalchemy.py# import AuthnPlugin # from yapsy.IPlugin import IPlugin # from simpleplugins import Plugin import itertools # from pyramid_sqlalchemy import BaseObject from zope.sqlalchemy import ZopeTransactionExtension from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker import transaction from sqlalchemy import Column, ForeignKey, engine_from_config # , engine_from_config from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import relationship from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.types import Integer, String, Time, Binary # from pyramid_sqlalchemy import Session, metadata, init_sqlalchemy from openbm.plugins import auth Base = declarative_base() Session = sessionmaker() class OBMUser(Base): __tablename__ = 'obm_users' userid = Column(String(255), primary_key=True) passwd = Column(String(255)) name = Column(String(255)) # surname = Column(String(255)) email = Column(String(255)) phone1 = Column(String(15)) phone2 = Column(String(15)) phone3 = Column(String(15)) photo = Binary() lang = Column(String(5)) last_login = Time() class OBMGroup(Base): __tablename__ = 'obm_groups' groupid = Column(Integer, primary_key=True) name = Column(String(255)) parent = Column(Integer, ForeignKey('obm_groups.groupid')) description = Column(String(255)) parent_rel = relationship('OBMGroup', remote_side=[groupid], backref='children_rel') users_rel = relationship('OBMUserGroups') @hybrid_property def users(self): return [user.user for user in self.users_rel] @hybrid_property def childrens(self): if self.groupid == 0: name = '/' else: name = self.name if self.children_rel: path = '/'.join([group.childrens for group in self.children_rel]) return [name, '/' + path] else: return name class OBMUserGroups(Base): __tablename__ = 'obm_usergroups' groupid = Column(Integer, ForeignKey('obm_groups.groupid'), primary_key=True) userid = Column(String(255), ForeignKey('obm_users.userid'), primary_key=True) group_rel = relationship('OBMGroup', uselist=False) user = relationship('OBMUser') @hybrid_property def group(self): if int(self.groupid) == -2: return OBMGroup(groupid=0, name='root', parent=0) else: return self.group_rel class SQLAlchemyAuth(auth.SecurityBase): def test_plugin(): config = {'url': 'sqlite:///:memory:'} return SQLAlchemyAuth(config) def __init__(self, config): engine = engine_from_config(config, '') Session.configure(bind=engine, extension=ZopeTransactionExtension()) Base.metadata.create_all(engine) self.session = Session() try: with transaction.manager: self.session.add(OBMUser(userid='root', passwd='')) self.session.add(OBMGroup(groupid=0, parent=-1, name='root', description='Root Group')) self.session.add(OBMUserGroups(groupid=0, userid='root')) except IntegrityError: pass super().__init__() def _user_exists(self, userid): return self.session.query(OBMUser).filter_by(userid=userid).first() def _group_exists(self, group_path): parent = 0 if not group_path or group_path == ['root']: return self.session.query(OBMGroup).filter_by(groupid=0).first() for group_name in group_path: if group_name == '': continue group = self.session.query(OBMGroup).filter_by(name=group_name, parent=parent ).first() if not group: return False parent = group.groupid return group def check_credentials(self, userid, passwd): user = self.session.query(OBMUser).filter_by(userid=userid, passwd=passwd).first() if user is not None: return itertools.chain(*self.user_groups(userid)) else: return None def create_user(self, userid, passwd, **kwargs): try: with transaction.manager: self.session.add(OBMUser(userid=userid, passwd=passwd, **kwargs)) except IntegrityError: raise auth.DuplicateUser(userid) def create_group(self, group, desc): group_path = group.split('/') group_name = group_path[-1] if self._group_exists(group_path): raise auth.DuplicateGroup(group) parent = self._group_exists(group_path[:-1]) if not parent: raise auth.NoSuchGroup(group) with transaction.manager: self.session.add(OBMGroup(name=group_name, parent=parent.groupid, description=desc)) def delete_user(self, userid): raise auth.NotImplemented() def delete_group(self, group_name): group_path = group_name.split('/') group = self._group_exists(group_path) if not group: raise auth.NoSuchGroup(group_name) try: self.session.query(OBMGroup).filter_by(parent=group.groupid).one() raise auth.DeleteGroupChildren('/'.join(group_path)) except NoResultFound: with transaction.manager: self.session.delete(group) def change_password(self, userid, oldpasswd, newpasswd): if self.check_credentials(userid, oldpasswd): with transaction.manager: self.session.query(OBMUser).filter_by( userid=userid).update({'passwd': newpasswd}) return True else: return False def get_all_users(self): return [row.userid for row in self.session.query(OBMUser).all()] def get_user(self, userid): user = self._user_exists(userid) if not user: raise auth.NoSuchUser(userid) return {'userid': user.userid, 'name': user.name, 'email': user.email, 'groups': self.user_groups(userid)} def get_group(self, group_name): group_path = group_name.split('/') group = self._group_exists(group_path) if not group: raise auth.NoSuchGroup(group_name) return {'group': group.name, 'path': '/' + '/'.join(group_path), 'children': [row.name for row in self.session.query(OBMGroup). filter_by(parent=group.groupid).all()], 'group_description': group.description, 'users': [user.userid for user in group.users]} def add_user_group(self, group_path, userid): if self._user_exists(userid): try: groupid = self._group_exists(group_path.split('/')).groupid except AttributeError: raise auth.NoSuchGroup(group_path) with transaction.manager: self.session.add(OBMUserGroups(groupid=groupid, userid=userid)) else: raise auth.NoSuchUser(userid) def user_groups(self, userid): return [row.group.childrens for row in ( self.session.query(OBMUserGroups).filter_by(userid=userid ).all())] def group_users(self, group_name): group_path = group_name.split('/') group = self._group_exists(group_path) if not group: raise auth.NoSuchGroup(group_name) return group.users PK!R  $openbm/plugins/executors/__init__.pyimport abc class ExecutorBase(metaclass=abc.ABCMeta): """Base class for example plugin used in the tutorial. """ @abc.abstractmethod def exec_step(self, step, env): pass @abc.abstractmethod def abort_job(self, jobid): pass PK!!popenbm/plugins/executors/cmd.pyimport asyncio from concurrent.futures import ThreadPoolExecutor import psutil from subprocess import PIPE from schema import Optional from openbm.exceptions import JobAbend # , JobError from openbm.major import loop from openbm.plugins.executors import ExecutorBase class Command(ExecutorBase): SCHEMA = {'cmd': str, Optional('shell'): str, Optional('runas'): str, } def test_plugin(): return Command(None, None) def __init__(self, config, scheduler): self.processes = {} def start_job(self, jobid, runid, jobspec): pass async def exec_step(self, jobid, step, env): p = psutil.Popen(str(step['cmd']), stdout=PIPE, shell=True) with ThreadPoolExecutor() as thread_executor: process = loop.run_in_executor(thread_executor, p.wait) self.processes[jobid] = process try: await process except asyncio.CancelledError: p.kill() self.processes.pop(jobid) raise JobAbend() from RuntimeError('killed') self.processes.pop(jobid) if p.returncode > 0: raise JobAbend() from SystemError(p.returncode) else: return {'stdout': p.stdout.read()} async def abort_job(self, jobid): task = self.processes[jobid] task.cancel() await asyncio.wait((task,)) def end_job(self, job): pass def on_shutdown(self): pass PK!@Ycopenbm/plugins/executors/job.py class Job(object): SCHEMA = {'name': str, } def test_plugin(): return Logical(None) def init__(self, config): pass PK! openbm/plugins/executors/null.pyclass Null(object): SCHEMA = {} def test_plugin(): return Null(None, None) def __init__(self, config, scheduler): pass def start_job(self, jobid, runid, jobspec): pass async def exec_step(self, jobid, step, env): pass async def abort_job(self, jobid): pass def end_job(self, runid): pass def on_shutdown(self): pass PK!$openbm/plugins/executors/requests.pyimport asyncio from concurrent.futures import ThreadPoolExecutor import psutil from subprocess import PIPE from schema import Optional from openbm.exceptions import JobAbend # , JobError from openbm.major import loop class Command(object): SCHEMA = {'cmd': str, Optional('shell'): str, Optional('runas'): str, } def test_plugin(): return Command(None, None) def __init__(self, config, scheduler): self.processes = {} @staticmethod def _create_rc_exception(return_code): class RCException(Exception): pass RCException.__name__ = f'RC{return_code}' return RCException() def start_job(self, jobid, runid, jobspec): pass async def exec_step(self, jobid, step, env): p = psutil.Popen(str(step['cmd']), stdout=PIPE, shell=True) with ThreadPoolExecutor() as thread_executor: process = loop.run_in_executor(thread_executor, p.wait) self.processes[jobid] = process try: await process except asyncio.CancelledError: p.kill() self.processes.pop(jobid) raise JobAbend() from RuntimeError('killed') self.processes.pop(jobid) if p.returncode > 0: raise JobAbend() from self._create_rc_exception(p.returncode) else: return {'stdout': p.stdout.read()} async def abort_job(self, jobid): task = self.processes[jobid] task.cancel() await asyncio.wait((task,)) def end_job(self, job): pass def on_shutdown(self): pass PK!*ӏopenbm/plugins/load.pyimport logging from stevedore import (dispatch, driver, enabled, extension, ) logger = logging.getLogger(__name__) def _enabled_notifier(ext, config): if ((config.has_section(f'notify_{ext.name}') and not config.has_option(f'notify_{ext.name}', 'enabled')) or (config.has_option(f'notify_{ext.name}', 'enabled') and config.getboolean(f'notify_{ext.name}', 'enabled'))): logger.info(f'Initializing {ext.name} notifier') return True else: if (config.has_option(f'notify_{ext.name}', 'enabled') and not config.getboolean(f'notify_{ext.name}', 'enabled')): logger.warning(f'Notifier {ext.name} is disabled') return False def load_auth_backend(config): name = config['major']['auth_backend'] if f'{name}_auth' not in config: config[f'{name}_auth'] = {} return driver.DriverManager( namespace='openbm.plugins.auth', name=name, invoke_on_load=True, invoke_args=(config[f'{name}_auth'],), ).driver def load_notifiers(config): return [notify.plugin(config[f'notify_{notify.name}']) for notify in enabled.EnabledExtensionManager( namespace='openbm.plugins.notifiers', check_func=lambda ext: _enabled_notifier(ext, config), invoke_on_load=False, invoke_args=(config,), )] def load_executors(scheduler): return dispatch.DispatchExtensionManager( namespace=f'openbm.plugins.executors', check_func=lambda ext: True, invoke_on_load=True, propagate_map_exceptions=True, invoke_args=(scheduler.config, scheduler), ) def load_resolvers(config): return dispatch.DispatchExtensionManager( namespace=f'openbm.plugins.resolvers', check_func=lambda ext: True, invoke_on_load=True, propagate_map_exceptions=True, invoke_args=(config,), ) def load_schema(plugin_type): return extension.ExtensionManager( namespace=f'openbm.plugins.{plugin_type}', invoke_on_load=False, ) PK!3$openbm/plugins/notifiers/__init__.pyimport abc class NotifierBase(metaclass=abc.ABCMeta): def __init__(self, config): self.event_mask = config.get('events', 'ALL').split(',') self.config = config def notify(self, recipients, message, title, **kwargs): """to be overriden""" def filter(self, event): return True if (event in self.event_mask or 'ALL' in self.event_mask or f'!{event}' not in self.event_mask) else False PK!2 2 -openbm/plugins/notifiers/generic_notifiers.pyimport logging # import notifiers as gnotifiers from notifiers.providers.email import SMTP as SMTP_backend from notifiers.providers.telegram import Telegram as Telegram_backend from notifiers.providers.pushbullet import Pushbullet as Pushbullet_backend from notifiers.core import Provider, Response from openbm.plugins import notifiers logger = logging.getLogger(__name__) class DummyNotifier(Provider): site_url = '/dev/null' base_url = '/dev/null' name = 'dummy' _schema = {'type': 'object', 'properties': {'message': {'type': 'string', 'title': ''}}, 'additionalProperties': True} _required = {'required': ['message']} def _send_notification(self, data: dict) -> Response: pass class Notifiers(notifiers.NotifierBase): def __init__(self, notifier_to, recipient_field, config): super().__init__(config) config.pop('enabled') self.notifier_to = notifier_to self.to_attr = recipient_field def notify(self, recipients, **kwargs): kwargs.pop('events') for recipient in recipients: if hasattr(recipient, self.to_attr): logger.debug(f'notifing {getattr(recipient, self.to_attr)}') self.notifier.notify(**{**kwargs, self.notifier_to: getattr(recipient, self.to_attr)}) else: logger.debug(f'Notification not sent: user {recipient} does ' f'not has an attribute named {self.to_attr}') class Email(Notifiers): def test_plugin(): plugin = Email({'enabled': True, 'events': 'ALL', 'from_addr': 'noreply@localhost'}) plugin.notifier = DummyNotifier() return plugin def __init__(self, config): self.notifier = SMTP_backend() super().__init__('to', 'email', config) def notify(self, recipients, message, title): super().notify(recipients, message=message, subject=title, from_=self.config['from_addr'], **dict(self.config)) class Telegram(Notifiers): def test_plugin(): plugin = Telegram({'enabled': True, 'events': 'ALL', 'token': 1234, 'user_field': 'phone1'}) plugin.notifier = DummyNotifier() return plugin def __init__(self, config): self.notifier = Telegram_backend() self.token = config.pop('token') super().__init__('chat_id', config.get('user_field', 'email'), config) def notify(self, recipients, message, title): super().notify(recipients, message=message, token=self.token, **dict(self.config)) class Pushbullet(Notifiers): def test_plugin(): plugin = Pushbullet({'enabled': True, 'events': 'ALL', 'token': 1234, 'user_field': 'email'}) plugin.notifier = DummyNotifier() return plugin def __init__(self, config): self.notifier = Pushbullet_backend() self.token = config.pop('token') super().__init__('email', config.get('user_field', 'email'), config) def notify(self, recipients, message, title): super().notify(recipients, message=message, title='', token=self.token, **dict(self.config)) PK!B{{#openbm/plugins/notifiers/testing.pyimport pkg_resources from openbm.plugins import notifiers # Create the fake entry point definition ep = pkg_resources.EntryPoint.parse('dummy = openbm.plugins.notifiers.testing:' 'TestNotifier') # Create a fake distribution to insert into the global working_set d = pkg_resources.Distribution('') # Add the mapping to the fake EntryPoint d._ep_map = {'openbm.plugins.notifiers': {'dummy': ep}} # Add the fake distribution to the global working_set pkg_resources.working_set.add(d, 'dummy') class TestNotifier(notifiers.NotifierBase): def notify(self, recipients, message, title): pass PK!5}}$openbm/plugins/resolvers/__init__.pyimport abc class ResolverBase(metaclass=abc.ABCMeta): """Base class for example plugin used in the tutorial. """ def init(self, jobid, runid, jobspec): pass @abc.abstractmethod def exec_step(self, step, env): pass def end_step(self, step): pass def end_job(self, job): pass def on_shutdown(self): pass PK!;11openbm/plugins/resolvers/job.pyclass Job(object): SCHEMA = {'name': str, } def test_plugin(): return Job(None) def __init__(self, config): pass def resolve_dependency(self, jobid, dep): if dep == 'dep1': return True else: return ('NDF', 'yellow') PK!kcc55 openbm/plugins/resolvers/user.py class User(object): SCHEMA = {'name': str, } def test_plugin(): return User(None) def __init__(self, config): pass def resolve_dependency(self, jobid, dep): if dep == 'dep1': return True else: return ('NDF', 'yellow') PK!topenbm/server.conf[minor] # Full Qualified Node name node_name = "foobar" # Loggin level log_level = info # Working dir name data_dir = dat a # Major nodes connection list servers = 192.168.1.1:8900,192.168.1.2:8080 # Virtual schedulers assigned to this node (leave blank for all) #schedulers = "1,2,3" # Node address (leave blank for all interfaces) address = "" # Node port (leave in blank for random) port = 4000 [servers] [security] #Secyrity security = enable # Client certificate certificate = # Security policy sec_policy = internal/ldap/pam [major] # Server type server = enable #enable/disable/allowed # Cluster address (leave in blank to bind to all address) cluster_addr = "" # Cluster port cluster_port = 4001 # Operation Manager address om_address = # Operation Manager port om_port = 4080 # Log every request om_log_level = info # Manage self-signed x509 certificates manage_certs = true [cluster] server1=192.168.1.1:9000 server2=192.168.1.2:9000 PK! openbm/server1.conf[minor] # Full Qualified Node name node_name = foobar # Loggin level log_level = info # Working dir name data_dir = openbm/dat a # Major nodes connection list servers = 192.168.1.1:8900,192.168.1.2:8080 # Virtual schedulers assigned to this node (leave blank for all) #schedulers = "1,2,3" #Secyrity ssl = true # Client certificate certificate = # Initiators assigned to this node (set 0 to disable jobs in this node) inits = 100 [major] # Server type server = true #Tipolgy cluster = false # Node address node_addr = 127.0.0.1 # Node port node_port = 8901 # Default timezone used by the scheduler timezone=UTC default_scheduler = 1 # Authentification backend (sqlalchemy/ldap) auth_backend = insecure # Cluster address (leave in blank to bind to all address) cluster_addr = 127.0.0.1 # Cluster port cluster_port = 9000 # Operation Manager address om_address = naiandei.net # Operation Manager port om_port = 4080 # Log every request om_log_level = info # Manage self-signed x509 certificates manage_certs = true reload_time = 19:50:00 [cluster] server1=127.0.0.1:9000 server2=127.0.0.1:9001 server3=127.0.0.1:9002 [insecure_auth] email=oscar@naiandei.net [sqlalchemy_auth] url = sqlite:///tutorial.db [notify_pushbullet] enabled=true token=o.Zv48POl9MZ6QqGvEsHc59y2fzhuMNv7a events=ABD [notify_email] enabled=false events=ALL from_addr=no_reply@localhost [notif_telegram] token=492637254:AAE-Zhy444kwgniI3FidEbTYSKwOd7ug1Sw events=ABD PK!, openbm/server2.conf[minor] # Full Qualified Node name node_name = "foobar" # Loggin level log_level = info # Working dir name data_dir = dat a # Major nodes connection list servers = 192.168.1.1:8900,192.168.1.2:8080 # Virtual schedulers assigned to this node (leave blank for all) #schedulers = "1,2,3" # Node address (leave blank for all interfaces) address = # Node port (leave in blank for random) port = 4001 [servers] [security] #Secyrity security = enable # Client certificate certificate = # Security policy sec_policy = internal/ldap/pam [major] # Server type server = enable #enable/disable/allowed # Cluster address (leave in blank to bind to all address) cluster_addr = 127.0.0.1 # Cluster port cluster_port = 9001 # Operation Manager address (leave in blank to bind to all address) om_address = naiandei.net # Operation Manager port om_port = 4081 # Log every request om_log_level = info # Manage self-signed x509 certificates manage_certs = true [cluster] server1=127.0.0.1:9000 server2=127.0.0.1:9001 server3=127.0.0.1:9002 PK!openbm/server3.conf[minor] # Full Qualified Node name node_name = "foobar" # Loggin level log_level = info # Working dir name data_dir = dat a # Major nodes connection list servers = 192.168.1.1:8900,192.168.1.2:8080 # Virtual schedulers assigned to this node (leave blank for all) #schedulers = "1,2,3" # Node address (leave blank for all interfaces) address = # Node port (leave in blank for random) port = 4002 [servers] [security] #Secyrity security = enable # Client certificate certificate = # Security policy sec_policy = internal/ldap/pam [major] # Server type server = enable # Cluster address (leave in blank to bind to all address) cluster_addr = 127.0.0.1 # Cluster port cluster_port = 9002 # Operation Manager address (leave in blank to bind to all address) om_address = naiandei.net # Operation Manager port om_port = 4082 # Log every request om_log_level = info # Manage self-signed x509 certificates manage_certs = true [cluster] server1=127.0.0.1:9000 server2=127.0.0.1:9001 server3=127.0.0.1:9002 PK!UHopenbm/servera.conf[minor] # Full Qualified Node name node_name = foobar2 # Loggin level log_level = info # Working dir name data_dir = dat a # Major nodes connection list servers = 127.0.0.1:8901 # Virtual schedulers assigned to this node (leave blank for all) #schedulers = "1,2,3" #Security security = off # Client certificate certificate = # Default timezone used by the scheduler timezone=UTC # Initiators assigned to this node (set 0 to disable jobs in this node) inits = 100 PK!!@uuopenbm/test.pdl](nameagesizee.K.}.}.}(nameNageNsizeNu.PK!openbm/testutils.pyPK!G:openbm/tutorial.dbSQLite format 3@ - ))[tableobm_usergroupsobm_usergroupsCREATE TABLE obm_usergroups ( groupid INTEGER NOT NULL, userid VARCHAR(255) NOT NULL, PRIMARY KEY (groupid, userid), FOREIGN KEY(groupid) REFERENCES obm_groups (groupid), FOREIGN KEY(userid) REFERENCES obm_users (userid) );O)indexsqlite_autoindex_obm_usergroups_1obm_usergroupsi!!tableobm_groupsobm_groupsCREATE TABLE obm_groups ( groupid INTEGER NOT NULL, name VARCHAR(255), parent INTEGER, description VARCHAR(255), PRIMARY KEY (groupid), FOREIGN KEY(parent) REFERENCES obm_groups (groupid) )[tableobm_usersobm_usersCREATE TABLE obm_users ( userid VARCHAR(255) NOT NULL, passwd VARCHAR(255), name VARCHAR(255), email VARCHAR(255), phone1 VARCHAR(15), phone2 VARCHAR(15), phone3 VARCHAR(15), lang VARCHAR(5), PRIMARY KEY (userid) )1Eindexsqlite_autoindex_obm_users_1obm_users    root  root !rootRoot Group root  rootPK!H_Y'openbm-0.1.0.dist-info/entry_points.txt 00y@"E=4m$K"NV0z27,N:2HZ9ϓWn>T+Hy( e/ȧ3(Z6hn_Eo׫>xy](#a`ֿ3;dfEuT5CzYNm ME1]*#|!B'W}(#g(ߒPK!HNɡSTopenbm-0.1.0.dist-info/WHEEL HM K-*ϳR03rOK-J,/R(O-)T03"]b݀ҢԜ$+TtJcݼTļJ.PK!H"YIopenbm-0.1.0.dist-info/METADATA]o +ܤ9qfպbڴZr$^LҪ*;yNj*X.Ȉ%SP@]$C%V)flKuFj@b% 6n@֬8-XōV6(U_˛?Jp-n\4 t\n 淜r0bBb3('ܷ€Mo;İN1]JfX @C:R;&m_J3pD_1L)Qo7-cCVwG^f }p@ p~68ʏo tB"l4Hz08ynJbc@Nh"4,&oP4kÜ&nD9$ѱO:d[䲸\c['G&^iCAp(@xMDw=lWs]OtK%{ҵ;CLzW"1S232Da{0cK&=ڥj w?r#uρ-!7Oˡ?e@8|{l0ӗb_[PK!H[WJDopenbm-0.1.0.dist-info/RECORDǚʚ}Zx38Vp\+SUkRe=5x}(h ~ta#~2k^Ye*QY޴1A/ސzhV~Oh3Ւz=*e~Ư1R Jt 5}{׿6mCʵu.y .X jw.( y;f'f27ߓ1|W o1SG8;gax / l5\)J7߱~C)&?IZ St)N;3HgA0<4t]ddȔ]Bq"x%o F!w@oa?؟$/vNYԊx*,z-ygЀU `}8#cDe&J=Jм7@hŶ<z\bUX7 ?yS,M:ܷP>8z!ϸV/oѯ8tK,;^*"7Y nnq7# ?B<SkHU?b 0vӤU7)WHf >YY<̷iGyoKͳƠn#p=6oeU]cwm?eXϮ[]Ε"X4aF TNzCЯʻGp`q kcڗgBn[D'fbS,Bœo#m[O{v(al>-M9r]T0=ߺYoWqp=Ibq]SN{IbP):Z;=v6BǝuCpPL+%+:3C y|7ӣQz 9Q! b(9,ip1K' # 1;Qg Wy-Mba8LJ?>]5j,y@H=}[td#ᴦPʁw =1OLdU +0C|q ;?nss씦 "/ƺz1R,ؾIREUVdřq5ġ9pM}㿜=\JX))f56qWTGQI, 'G&m2MQB _|bH JGg Xn`ܗ}b4IO$uA߀]Bh$TpQ}~w.{3 '91G.>Ht{9sU`h z1Dz}4B,,"'7NY(Ɏ;ꬵiL7ZU)>tpaQvAO?L<񭬱ԉ#67?qp ڑT&&lGkxH-,ZBrYCN8A@ 7!(')Pl^8/Dk=V- o>c-F_5pPJt WSaתU(}o'czCSmkŻ^%"+jތU~Auޜ ɧ=\o!?rC.LǡΏђaEP񦛂72p\X S.!<2xU?MvILAӶs,!U>7;(jq5G{{g&Q=,zΦB嵍FhłG}l徝/7AeQvV.H]5 q3poL$ i6=w D -nI\k*va J*Π++2~;,71RVb8/PgqEf/1h[ 20{*X,^U>Ol,.-pqE%כ[sӇG+߃_ ^~n+#MKxuKp+̫OQ>V[q&wZz=4Anj Je*bC#iNmk1#!`_JPƫ/@S,n-cwC;ui\Dl*>*M3d[~b]P  r—qnvxw;y*[Z9';) R ]}NwH8+}m I 0Q6d"4}l9d&> 0`d+ՅO!ebɉ̪nR\e,~87CiA&A0SN5ZT66KqӇN "  ( ~ʸ)+3í.E8s95%jc4˂+20$)SNh1a0GJ1BZ4spӢgi `αS>+ɱ hs/P@ OȠBO.^ϸIYBVx$ =LN#4+2U6cr˂L*68+ +\4%k1fP"k˭%[(ASת+m6Za6u{8l:*n/.bgJcL![&4%%}YH$^n dd|x- S GDŽ €(^_I"ȇ2jxK&kȦ[?>% 9"}WRͣg`= 0Bb8*›+"}? LH-jv(CuZOdLO"8z8j5ar>hc9Y ( /Q<؉ :1[0T\1ۘHxΈoGen }\KH&d)q[0u/t" 71 8{ [)^0z)(ml0)㇂>@x!-Oҁ`<Ƕp;6fL @!rwq_EpX Ic2  R/V*/Gr))\ V,dм)F*{-ڈ*Za2d]݆gF &;I? k%A9i>`%@bP~ZP]voT"*4'-8J B6Q)M|j1ӽ$[/:$\ڞja=ޛ|Ɵn㗻*^2ţk+ȅ}s$ƄрԎSss{XqiH眄<!⏜2- JZOG bą|M*<ᄿ$L|CeqG$zV;kOSrJ˩o8°^cn܎,d/S1| 燁t d\z`GtV_R ]8$+ڈ IiyňIurS|K5-VI gڪa ] (-GŌI;JJ*15b2PrJB ,]TA}d 8Ԋe+Ijŋ/IA< MMU5pdE>Iuʻ8T;·:2l"N."P4[r  -raMW5Sa[YSIh!mKOqqV8P6iuKc{_\@K-8~~z*Jt_)"X,:o JPr|^eC9"l)~F4XU2KUbPenoWy+ʇe'I,D㞌ILbIt90-fU(P49o&amWvc `^)s7(Re[*)$台mӳq(,C}8Qd"usYzիhpK"+3)>2׺3KOXn$rGBI]pC0: `|; a//^Uc?Of&'- V.B&jj# :ڀ{byyI !",$g3Ddyl% AWx"Rt-Ηj;K\ɓ5#<,Ғ0 ӼIYN^Ύpuq nO5-AGmZhˑ]𪧋L#ѸHe 'dhMCU> ߎn+A s߯ԇ_L K=(9РD3>gp]FЀ Ymϓߣ?G7z!Uӻݷ[sgp,KP N!KJ؀-1yoqNE&&om o}RsأL1CV|9WcYn2䟉?ݽ>I]mr\P<ݙ+ÆiR;DwO +vx= ҞY v:K#B$aO/h2Oc;>"x.H7t/q8ϵPV": R"sG1 Q "(ܧI˻o{v?)}aCiq*jMakwIL?=!Ǿ=5o`iexG\Tŷ=}_o`̙2C [٧LVlr]@[c 7ܽ=q[V<7i% Dib9I"=c6*\6Spcsiu67o01 Jaw6T4d Wρ`TLw7=g]7=j~aw㖎>/Zhqf6=7jq>R<Qv%lЉ]U/dKk1OP' AuܔyFJκӲQ;~vyӸ<>"ShM$N'6]$-lRE~d*Aސ~Uy̿kMh.Z`:V.9 QLuIO{8Y֝GO rl0l^?z|4ݗI$y1h>p:4/)| ,yTJpwԜEqUd/x9pę HY>|\|yYejCXNϽ.openbm/dat a/joblogs/Prueba_plan_2minI41030600PK!|E.openbm/dat a/joblogs/Prueba_plan_2minI41031800PK!7މ. openbm/dat a/joblogs/Prueba_plan_2minI41033000PK!QK.openbm/dat a/joblogs/Prueba_plan_2minI41039000PK!ْC.openbm/dat a/joblogs/Prueba_plan_2minI41045000PK!Mb.openbm/dat a/joblogs/Prueba_plan_2minI41046200PK!)UU.openbm/dat a/joblogs/Prueba_plan_2minI41048600PK!U>.}openbm/dat a/joblogs/Prueba_plan_2minI43188400PK!-openbm/dat a/joblogs/Prueba_plan_2minI5502940PK!E$@-openbm/dat a/joblogs/Prueba_plan_2minI5503000PK!y-openbm/dat a/joblogs/Prueba_plan_2minI5503120PK!'~nn-openbm/dat a/joblogs/Prueba_plan_2minI5503540PK!~zz-W openbm/dat a/joblogs/Prueba_plan_2minI5503570PK!+B*-"openbm/dat a/joblogs/Prueba_plan_2minI7703540PK!|G.$#openbm/dat a/joblogs/Prueba_plan_2minI78-82859PK!s/.$openbm/dat a/joblogs/Prueba_plan_2minI79-169259PK!9/:%openbm/dat a/joblogs/Prueba_plan_2minI80-255659PK!PQ/F&openbm/dat a/joblogs/Prueba_plan_2minI81-342059PK!m/R'openbm/dat a/joblogs/Prueba_plan_2minI82-428459PK!o/^(openbm/dat a/joblogs/Prueba_plan_2minI83-514859PK!h,ѿ/j)openbm/dat a/joblogs/Prueba_plan_2minI84-601259PK!Q/v*openbm/dat a/joblogs/Prueba_plan_2minI85-687659PK!4{-+openbm/dat a/joblogs/Prueba_plan_2minR9704981PK!-,openbm/dat a/joblogs/Prueba_plan_2minR9705208PK!FJ--openbm/dat a/joblogs/Prueba_plan_2minR9705565PK!aS-.openbm/dat a/joblogs/Prueba_plan_2minR9706176PK!T-/openbm/dat a/joblogs/Prueba_plan_2minR9706405PK!c-0openbm/dat a/joblogs/Prueba_plan_2minR9706672PK!L ,1openbm/dat a/joblogs/Prueba_plan_yaD10511160PK!,P+2openbm/dat a/joblogs/Prueba_plan_yaD7703540PK!Ĉ ,3openbm/dat a/joblogs/Prueba_plan_yaI10508640PK!,4openbm/dat a/joblogs/Prueba_plan_yaI10508760PK!Ǽ,5openbm/dat a/joblogs/Prueba_plan_yaI10508820PK!@,6openbm/dat a/joblogs/Prueba_plan_yaI10508880PK!o,7openbm/dat a/joblogs/Prueba_plan_yaI10509000PK!ʼ,8openbm/dat a/joblogs/Prueba_plan_yaI10509120PK!fؼ,9openbm/dat a/joblogs/Prueba_plan_yaI10509300PK!^S,:openbm/dat a/joblogs/Prueba_plan_yaI10509960PK!|7& ,;openbm/dat a/joblogs/Prueba_plan_yaI10510860PK!i,<openbm/dat a/joblogs/Prueba_plan_yaI10511160PK!TT+=openbm/dat a/joblogs/Prueba_plan_yaR5625139PK!m+>openbm/dat a/joblogs/Prueba_plan_yaR5625204PK!-$+@openbm/dat a/joblogs/Prueba_plan_yaR5625291PK!8h+Aopenbm/dat a/joblogs/Prueba_plan_yaR5625595PK!+Bopenbm/dat a/joblogs/Prueba_plan_yaR5626209PK!Ym+ Copenbm/dat a/joblogs/Prueba_plan_yaR5626373PK!u>+Dopenbm/dat a/joblogs/Prueba_plan_yaR5626675PK!s˻+Eopenbm/dat a/joblogs/Prueba_plan_yaR5626985PK!7L+Fopenbm/dat a/joblogs/Prueba_plan_yaR5627175PK!,ڌ+Gopenbm/dat a/joblogs/Prueba_plan_yaR7725974PK! Hopenbm/dat a/jobs.dbPK! pYNopenbm/dat a/jobsets.dbPK!P^ ^ Oopenbm/dat a/list.dbPK!ㆯ\openbm/dat a/localjobs.dbPK!0lAzopenbm/dat a/network.conf.testPK!?((({openbm/dummy_module.pyPK!U{openbm/exceptions.pyPK!pS)S)&openbm/major/DAG_sqlalchemy.jobview.pyPK!1p[RR6openbm/major/__init__.pyPK!SzQQ-openbm/major/catalogs.pyPK!emopenbm/major/dist_logging.pyPK!:Kopenbm/major/exceptions.pyPK! Popenbm/major/jobview.pyPK!GD9openbm/major/nodestores.pyPK!aIopenbm/major/oscar.pyPK!WT@@Popenbm/major/oskitarPK!*openbm/major/triggers.pyPK!4openbm/major/utils.pyPK! openbm/major/webapps/__init__.pyPK!gAA!openbm/major/webapps/resources.pyPK!f##openbm/major/webapps/webapi.pyPK!ic!openbm/major/webapps/webserver.pyPK!@r-r-openbm/minor/__init__.pyPK!--Aopenbm/minor/__init__.py.origPK!I۰oopenbm/minor/schemas.pyPK!Sq VVopenbm/minor/schemas.py.origPK!g: : =openbm/openbmd.pyPK!openbm/plugins/__init__.pyPK![m. . ޥopenbm/plugins/auth/__init__.pyPK!T'Iopenbm/plugins/auth/insecure.pyPK!dF44!openbm/plugins/auth/sqlalchemy.pyPK!R  $openbm/plugins/executors/__init__.pyPK!!pGopenbm/plugins/executors/cmd.pyPK!@Ycqopenbm/plugins/executors/job.pyPK! Nopenbm/plugins/executors/null.pyPK!$'openbm/plugins/executors/requests.pyPK!*ӏopenbm/plugins/load.pyPK!3$openbm/plugins/notifiers/__init__.pyPK!2 2 -openbm/plugins/notifiers/generic_notifiers.pyPK!B{{#aopenbm/plugins/notifiers/testing.pyPK!5}}$openbm/plugins/resolvers/__init__.pyPK!;11openbm/plugins/resolvers/job.pyPK!kcc55 Jopenbm/plugins/resolvers/user.pyPK!topenbm/server.confPK!  openbm/server1.confPK!, openbm/server2.confPK!openbm/server3.confPK!UH)openbm/servera.confPK!!@uu4openbm/test.pdlPK!openbm/testutils.pyPK!G:openbm/tutorial.dbPK!H_Y'75openbm-0.1.0.dist-info/entry_points.txtPK!HNɡSTW6openbm-0.1.0.dist-info/WHEELPK!H"YI6openbm-0.1.0.dist-info/METADATAPK!H[WJDj9openbm-0.1.0.dist-info/RECORDPK<X