PK!j94zelt/__init__.pyfrom .zelt import deploy, rescale, delete, invoke_transformer __all__ = ["deploy", "rescale", "delete", "invoke_transformer"] PK!zelt/kubernetes/__init__.pyPK!餻)zelt/kubernetes/client.pyimport logging from typing import List, Optional, Callable from kubernetes import config from kubernetes.client import ( ExtensionsV1beta1Api, CoreV1Api, V1Namespace, ExtensionsV1beta1Deployment, V1Pod, V1Service, V1beta1Ingress, V1DeleteOptions, V1Status, V1ContainerStatus, ) from kubernetes.client.rest import ApiException from tenacity import retry, stop_after_delay, wait_fixed, retry_if_exception_type from .manifest import Manifest KUBE_API_LIST_TIMEOUT = 360 KUBE_API_DELETE_TIMEOUT = 240 KUBE_API_WAIT = 1 STATUS_NOT_FOUND = 404 DEFAULT_DELETE_OPTIONS = V1DeleteOptions(propagation_policy="Foreground") class PodNotReadyError(RuntimeError): pass class ResourceStillThereError(RuntimeError): pass def read_config(): try: config.load_kube_config() except FileNotFoundError: logging.error("Kubernetes config. not found!") raise def create_namespace(namespace: Manifest) -> V1Namespace: logging.info("Creating Namespace %r...", namespace.name) try: return CoreV1Api().create_namespace(body=namespace.body) except ApiException as err: logging.error("Failed to create Namespace %r: %s", namespace.name, err.reason) raise def delete_namespace(name: str) -> Optional[V1Status]: logging.info("Deleting Namespace %r...", name) try: CoreV1Api().delete_namespace(name=name, body=DEFAULT_DELETE_OPTIONS) except ApiException as err: if err.status == STATUS_NOT_FOUND: logging.debug("Skipping Namespace %r deletion: %s", name, err.reason) return logging.error("Failed to delete Namespace %r: %s", name, err.reason) raise await_no_resources_found(CoreV1Api().read_namespace, name=name) def create_deployment(deployment: Manifest) -> ExtensionsV1beta1Deployment: logging.info("Creating Deployment %r...", deployment.name) try: return ExtensionsV1beta1Api().create_namespaced_deployment( namespace=deployment.namespace, body=deployment.body ) except ApiException as err: logging.error("Failed to create Deployment %r: %s", deployment.name, err.reason) raise def rescale_deployment( manifest: Manifest, replicas: int ) -> ExtensionsV1beta1Deployment: logging.info("Rescaling Deployment %r to %s Replicas...", manifest.name, replicas) if replicas < 0: raise ValueError(f"Expected a positive number of Replicas, got {replicas}") logging.debug("Fetching existing Deployment %r...", manifest.name) try: deployment = ExtensionsV1beta1Api().read_namespaced_deployment( name=manifest.name, namespace=manifest.namespace ) except ApiException as err: logging.error("Failed to fetch Deployment: %s", err.reason) raise logging.debug( "Changing Deployment Replicas from %s to %s...", deployment.spec.replicas, replicas, ) deployment.spec.replicas = replicas logging.debug("Redeploying Deployment %r...", manifest.name) try: return ExtensionsV1beta1Api().replace_namespaced_deployment( name=manifest.name, namespace=manifest.namespace, body=deployment ) except ApiException as err: logging.error("Failed to redeploy Deployment %r: %s", manifest.name, err.reason) raise def delete_deployments(namespace: str) -> Optional[V1Status]: logging.info("Deleting Deployments in Namespace %r...", namespace) try: ExtensionsV1beta1Api().delete_collection_namespaced_deployment( namespace=namespace ) except ApiException as err: if err.status == STATUS_NOT_FOUND: logging.debug("Skipping Deployment deletion: %s", err.reason) return logging.error( "Failed to delete Deployments in Namespace %r: %s", namespace, err.reason ) raise await_no_resources_found( ExtensionsV1beta1Api().list_namespaced_deployment, namespace=namespace ) def create_service(service: Manifest) -> V1Service: logging.info("Creating Service %r...", service.name) try: return CoreV1Api().create_namespaced_service( namespace=service.namespace, body=service.body ) except ApiException as err: logging.error("Failed to create Service %r: %s", service.name, err.reason) raise def delete_service(name: str, namespace: str) -> Optional[V1Status]: logging.info("Deleting Service %r...", name) try: CoreV1Api().delete_namespaced_service( name=name, namespace=namespace, body=DEFAULT_DELETE_OPTIONS ) except ApiException as err: if err.status == STATUS_NOT_FOUND: logging.debug("Skipping Service %r deletion: %s", name, err.reason) return logging.error("Failed to delete Service %r: %s", name, err.reason) raise await_no_resources_found(CoreV1Api().list_namespaced_service, namespace=namespace) def create_ingress(ingress: Manifest) -> V1beta1Ingress: logging.info("Creating Ingress %r...", ingress.name) try: return ExtensionsV1beta1Api().create_namespaced_ingress( namespace=ingress.namespace, body=ingress.body ) except ApiException as err: logging.error("Failed to create Ingress %r: %s", ingress.name, err.reason) raise def delete_ingress(name: str, namespace: str) -> Optional[V1Status]: logging.info("Deleting Ingress %r...", name) try: ExtensionsV1beta1Api().delete_namespaced_ingress( name=name, namespace=namespace, body=DEFAULT_DELETE_OPTIONS ) except ApiException as err: if err.status == STATUS_NOT_FOUND: logging.debug("Skipping Ingress %r deletion: %s", name, err.reason) return logging.error("Failed to delete Ingress %r: %s", name, err.reason) raise await_no_resources_found( ExtensionsV1beta1Api().list_namespaced_ingress, namespace=namespace ) @retry( stop=stop_after_delay(KUBE_API_DELETE_TIMEOUT), wait=wait_fixed(KUBE_API_WAIT), retry=retry_if_exception_type(ResourceStillThereError), ) def await_no_resources_found(list_resources: Callable, **kwargs): try: found = list_resources(**kwargs) except ApiException as err: if err.status == STATUS_NOT_FOUND: return raise if hasattr(found, "items"): found = found.items if found: raise ResourceStillThereError(f"Resource(s): {found} still found; retrying.") @retry( stop=stop_after_delay(KUBE_API_LIST_TIMEOUT), wait=wait_fixed(KUBE_API_WAIT), retry=retry_if_exception_type(PodNotReadyError), ) def wait_until_pod_ready(deployment: Manifest) -> None: pod_ready = _pod_status(deployment).ready if not pod_ready: raise PodNotReadyError() @retry(stop=stop_after_delay(KUBE_API_LIST_TIMEOUT), wait=wait_fixed(KUBE_API_WAIT)) def _pod_status(deployment: Manifest) -> V1ContainerStatus: pod_list = _list_pod(deployment.namespace, deployment.labels) return pod_list[0].status.container_statuses[0] def _list_pod(namespace: str, labels: str) -> List[V1Pod]: logging.debug("Listing Pod(s) in Namespace %r with Labels %r...", namespace, labels) try: return ( CoreV1Api() .list_namespaced_pod(namespace=namespace, label_selector=labels) .items ) except ApiException as err: logging.error( "Failed to list Pod(s) in Namespace %r with Labels %r: %s", namespace, labels, err.reason, ) raise PK!v zelt/kubernetes/deployer.pyimport logging import os from tenacity import RetryError import zelt.kubernetes.client as kube from zelt.kubernetes.manifest_set import ManifestSet from zelt.kubernetes.storage.protocol import LocustfileStorage def create_resources( ms: ManifestSet, storage: LocustfileStorage, locustfile: os.PathLike ) -> None: try: kube.read_config() kube.create_namespace(ms.namespace) storage.upload(locustfile) kube.create_deployment(ms.controller) kube.wait_until_pod_ready(ms.controller) if ms.worker: kube.create_deployment(ms.worker) kube.create_service(ms.service) kube.create_ingress(ms.ingress) except (kube.ApiException, RetryError) as err: logging.error("Kubernetes operation failed: %s", err.reason) def delete_resources(ms: ManifestSet, storage: LocustfileStorage) -> None: logging.info("Deleting resources...") try: kube.read_config() namespace = ms.namespace.name storage.delete() kube.delete_ingress(ms.ingress.name, namespace) kube.delete_service(ms.service.name, namespace) kube.delete_deployments(namespace) kube.delete_namespace(namespace) except (kube.ApiException, RetryError) as err: logging.error("Kubernetes operation failed: %s", err.reason) def update_worker_pods(ms: ManifestSet, worker_replicas: int) -> None: if ms.worker is not None: ms.worker.body["spec"]["replicas"] = worker_replicas def rescale_worker_deployment(ms: ManifestSet, replicas: int) -> None: if not ms.worker: logging.error( "Missing worker manifest. Only worker deployments can be rescaled." ) return try: kube.read_config() kube.rescale_deployment(ms.worker, replicas) except kube.ApiException as err: logging.error("Kubernetes operation failed: %s", err.reason) PK!Caazelt/kubernetes/manifest.pyimport logging import os from enum import Enum from pathlib import Path from typing import NamedTuple, List, Dict import yaml class ManifestsNotFoundException(Exception): pass class ResourceType(Enum): INGRESS = "Ingress" SERVICE = "Service" NAMESPACE = "Namespace" DEPLOYMENT = "Deployment" OTHER = ... class DeploymentRole(Enum): CONTROLLER = "controller" WORKER = "worker" OTHER = ... class Manifest(NamedTuple): body: dict def metadata(self, key: str): """ Returns the value of metadata *key*. :raise ValueError: If *key* is not a metadata of this manifest. """ try: return self.body["metadata"][key] except KeyError: raise ValueError(f"no metadata {key!r} in manifest") from None def nonempty_string_metadata(self, key: str) -> str: """ Returns the non-empty string corresponding to metadata *key*. :raise ValueError: If *key* is not a metadata of this manifest, or if its value is not a non-empty string. """ v = self.metadata(key) _assert_metadata_type(v, str, key) if not v: raise ValueError(f"unexpectedly empty string for {key!r} metadata") return v @property def kind(self) -> ResourceType: try: value = self.body["kind"] except KeyError: raise ValueError("no kind specified in manifest") from None try: return ResourceType(value.capitalize()) except ValueError: logging.warning( "Unsupported resource type %r converted into %s.", value, ResourceType.OTHER, ) return ResourceType.OTHER @property def name(self) -> str: return self.nonempty_string_metadata("name") @property def namespace(self) -> str: if self.kind is ResourceType.NAMESPACE: return self.name return self.nonempty_string_metadata("namespace") @property def labels(self) -> str: key = "labels" labels = self.metadata(key) _assert_metadata_type(labels, dict, key) return ",".join(["{}={}".format(k, v) for k, v in labels.items()]) @property def labels_dict(self) -> Dict[str, str]: return dict(self.body.get("metadata", {}).get("labels", {})) @property def role(self) -> DeploymentRole: role = self.labels_dict.get("role", "") try: return DeploymentRole(role.strip().lower()) except ValueError: return DeploymentRole.OTHER @property def host(self) -> str: pretty_key = "spec.rules[0].host" try: host = self.body["spec"]["rules"][0]["host"] except (KeyError, IndexError): raise ValueError(f"no {pretty_key} in manifest") _assert_type_as(host, str, f"as {pretty_key} in manifest") return host @classmethod def from_file(cls, file: Path) -> "Manifest": """ :raise ValueError: If no manifest can be read from *file*. """ try: body = yaml.safe_load(file.read_text()) except OSError as err: raise ValueError(f"can't read manifest from file {file}") from err except yaml.YAMLError as err: raise ValueError(f"can't read manifest from non-YAML file {file}") from err _assert_type_as(body, dict, f"as top-level manifest object in file {file}") return Manifest(body=body) @classmethod def all_from_directory(cls, manifests_path: os.PathLike) -> List["Manifest"]: manifests = [] for path in Path(manifests_path).glob("*"): if path.is_file(): try: manifests.append(Manifest.from_file(path)) except ValueError as err: logging.warning("Ignoring %s: %s.", path, err) if not manifests: raise ManifestsNotFoundException( f"Could not load any manifest files from {manifests_path}" ) return manifests def _assert_type_as(value, t: type, as_msg: str) -> None: if not isinstance(value, t): raise ValueError(f"expected a {t.__qualname__} but got {value!r} {as_msg}") def _assert_metadata_type(value, t: type, key: str) -> None: _assert_type_as(value, t, f"as {key!r} metadata") PK!u/ zelt/kubernetes/manifest_set.pyfrom collections import defaultdict from os import PathLike from typing import NamedTuple, List, Dict, Optional from zelt.kubernetes.manifest import Manifest, ResourceType, DeploymentRole class ManifestSet(NamedTuple): """ Result of categorizing the user-provided Kubernetes manifests. """ namespace: Manifest service: Manifest ingress: Manifest controller: Manifest worker: Optional[Manifest] # TOOD: Refactor. def from_directory(dir_path: PathLike) -> ManifestSet: categories: Dict[ResourceType, List[Manifest]] = defaultdict(list) for m in Manifest.all_from_directory(dir_path): categories[m.kind].append(m) # Sanity checks for manifests that must be given exactly once. unique_resources = ( ResourceType.NAMESPACE, ResourceType.SERVICE, ResourceType.INGRESS, ) for kind in unique_resources: wanted = f"resource of kind {kind.value!r}" try: manifests = categories[kind] except KeyError: raise ValueError(f"Missing required {wanted}.") from None if len(manifests) != 1: raise ValueError(f"Expected exactly one {wanted} but got {len(manifests)}.") # Sanity checks for deployment manifests. deploy_rk = f"resource of kind {ResourceType.DEPLOYMENT.value!r}" try: deployments = categories[ResourceType.DEPLOYMENT] except KeyError: raise ValueError(f"Missing required {deploy_rk}.") from None if len(deployments) > 1: actual_roles = {d.role for d in deployments} expected_roles = {DeploymentRole.CONTROLLER, DeploymentRole.WORKER} if not expected_roles.issubset(actual_roles): raise ValueError( "Distributed Locust deployments must have roles " f"covering {[r.value for r in expected_roles]}, " f"got only {[r.value for r in actual_roles]}." ) controllers = [d for d in deployments if d.role is DeploymentRole.CONTROLLER] if len(controllers) != 1: raise ValueError( "Expected exactly one deployment with role " f"{DeploymentRole.CONTROLLER.value!r}." ) workers = [d for d in deployments if d.role is DeploymentRole.WORKER] if len(workers) > 1: raise ValueError( f"Expected at most one deployment with role {DeploymentRole.WORKER.value!r}." ) return ManifestSet( namespace=categories[ResourceType.NAMESPACE][0], service=categories[ResourceType.SERVICE][0], ingress=categories[ResourceType.INGRESS][0], controller=controllers[0], worker=workers[0] if workers else None, ) PK!#zelt/kubernetes/storage/__init__.pyPK!%g&$zelt/kubernetes/storage/configmap.pyimport logging import os from pathlib import Path from kubernetes.client import V1ConfigMap, CoreV1Api, V1DeleteOptions from kubernetes.client.rest import ApiException import zelt.kubernetes.client as client from zelt.kubernetes.storage.protocol import LocustfileStorage CONFIGMAP_NAME = "zelt-locustfile" CONFIGMAP_KEY = "locustfile.py" class ConfigmapStorage(LocustfileStorage): def __init__(self, namespace: str, labels: dict) -> None: super().__init__() self.namespace = namespace self.labels = dict(labels) def upload(self, locustfile: os.PathLike) -> None: logging.info("Creating ConfigMap %r...", CONFIGMAP_NAME) config_map = V1ConfigMap( data={CONFIGMAP_KEY: Path(locustfile).read_text()}, metadata={"name": CONFIGMAP_NAME, "labels": self.labels}, ) try: logging.debug("Creating ConfigMap %r...", CONFIGMAP_NAME) CoreV1Api().create_namespaced_config_map( namespace=self.namespace, body=config_map ) logging.debug("ConfigMap %r created.", CONFIGMAP_NAME) except ApiException as err: logging.error( "Failed to create ConfigMap %r: %s", CONFIGMAP_NAME, err.reason ) raise def delete(self) -> None: try: logging.info("Deleting ConfigMap %r...", CONFIGMAP_NAME) CoreV1Api().delete_namespaced_config_map( name=CONFIGMAP_NAME, namespace=self.namespace, body=V1DeleteOptions(propagation_policy="Foreground"), ) logging.debug("Waiting for ConfigMap %r to be deleted...", CONFIGMAP_NAME) client.await_no_resources_found( CoreV1Api().list_namespaced_config_map, namespace=self.namespace ) logging.debug("ConfigMap %r deleted.", CONFIGMAP_NAME) except ApiException as err: if err.status == 404: logging.debug( "Skipping ConfigMap %r deletion: %s", CONFIGMAP_NAME, err.reason ) return logging.error( "Failed to delete ConfigMap %r: %s", CONFIGMAP_NAME, err.reason ) raise PK!}#zelt/kubernetes/storage/protocol.pyimport os class LocustfileStorage: def upload(self, locustfile: os.PathLike) -> None: raise NotImplementedError() def delete(self) -> None: raise NotImplementedError() PK!cнzelt/kubernetes/storage/s3.pyimport logging import os try: import boto3 except ImportError as err: raise ImportError( "boto3 not found. It is required for uploading to S3 with the " "'--storage=s3' option. " "It can be installed with 'pip install boto3'." ) from err from zelt.kubernetes.storage.protocol import LocustfileStorage class S3Storage(LocustfileStorage): def __init__(self, bucket: str, key: str) -> None: super().__init__() self.bucket = bucket self.key = key def upload(self, locustfile: os.PathLike) -> None: def _upload_callback(nb_bytes_transferred: int) -> None: logging.info( "Uploading %s to %s as %s: %s bytes transferred.", locustfile, self.bucket, self.key, nb_bytes_transferred, ) boto3.resource("s3").Object(self.bucket, self.key).upload_file( Filename=os.fspath(locustfile), Callback=_upload_callback ) def delete(self) -> None: logging.info("Deleting %s from S3 bucket %s...", self.key, self.bucket) boto3.resource("s3").Object(self.bucket, self.key).delete() PK!>sxx zelt/zelt.pyimport enum import logging import os import subprocess from pathlib import Path from time import time from typing import Optional, Sequence from zlib import adler32 from zelt.kubernetes import deployer, manifest_set from zelt.kubernetes.manifest_set import ManifestSet from zelt.kubernetes.storage.configmap import ConfigmapStorage from zelt.kubernetes.storage.protocol import LocustfileStorage from zelt.kubernetes.storage.s3 import S3Storage try: import transformer TRANSFORMER_NOT_FOUND = False except ImportError: TRANSFORMER_NOT_FOUND = True class HARFilesNotFoundException(Exception): pass class StorageMethod(enum.Enum): CONFIGMAP = enum.auto() S3 = enum.auto() @classmethod def from_storage_arg(cls, arg: str) -> "StorageMethod": if arg.lower() == "s3": return cls.S3 if arg.lower() in ("cm", "configmap"): return cls.CONFIGMAP raise ValueError(f"unknown {cls.__qualname__} {arg!r}") def build_storage( self, manifests: ManifestSet, s3_bucket: Optional[str] = None, s3_key: Optional[str] = None, ) -> LocustfileStorage: if self is StorageMethod.S3 and (not (s3_bucket and s3_key)): raise ValueError( "Missing required 's3-bucket' and/or 's3-key' options " "for 'storage=s3' option." ) if self is not StorageMethod.S3 and (s3_bucket or s3_key): raise ValueError( "Unexpected 's3-bucket' or 's3-key' options " "without 'storage=s3' option." ) if self is StorageMethod.S3: return S3Storage(bucket=s3_bucket, key=s3_key) return ConfigmapStorage( namespace=manifests.namespace.name, labels=manifests.namespace.labels_dict ) def deploy( locustfile: os.PathLike, worker_pods: int, manifests_path: Optional[os.PathLike], clean: bool, storage_method: StorageMethod, local: bool, s3_bucket: Optional[str] = None, s3_key: Optional[str] = None, ) -> None: if local: if manifests_path: logging.warning( "Mutually incompatible options 'local' and 'manifests' specified. Defaulting to running locally." ) return _deploy_locally(locustfile) if not manifests_path: raise ValueError("Missing required 'manifests' option.") _deploy_in_kubernetes( locustfile, worker_pods, manifests_path=manifests_path, clean_deployment=clean, storage_method=storage_method, s3_bucket=s3_bucket, s3_key=s3_key, ) def rescale(manifests_path, worker_pods: int) -> None: if not manifests_path: raise ValueError("Missing required 'manifests' option.") if worker_pods < 0: raise ValueError(f"Expected a positive number of pods, got {worker_pods}.") manifests = manifest_set.from_directory(manifests_path) deployer.update_worker_pods(manifests, worker_pods) deployer.rescale_worker_deployment(manifests, worker_pods) logging.info("Rescaling complete.") def delete( manifests_path: os.PathLike, storage_method: StorageMethod, s3_bucket: Optional[str] = None, s3_key: Optional[str] = None, ) -> None: if not manifests_path: raise ValueError("Missing required 'manifests' option.") manifests = manifest_set.from_directory(manifests_path) storage = storage_method.build_storage(manifests, s3_bucket, s3_key) deployer.delete_resources(manifests, storage) logging.info("Deletion complete.") def invoke_transformer( paths: Sequence[os.PathLike], plugin_names: Sequence[str] ) -> Path: if TRANSFORMER_NOT_FOUND: raise ImportError( "Transformer not found. It is required for calls to 'from-har'. " "It can be installed with 'pip install har-transformer'." ) har_files = [] for path in paths: if os.path.exists(path): har_files.append(Path(path)) if not har_files: raise HARFilesNotFoundException(f"Could not load any HAR files from {paths}") locustfile = Path(f"locustfile-{adler32(str(paths).encode(encoding='utf-8'))}.py") with locustfile.open("w") as f: transformer.dump(f, har_files, plugin_names) logging.info("%s created from %s.", locustfile, har_files) return locustfile def _deploy_locally(locustfile: os.PathLike) -> None: logging.info("Deploying Locust locally with locustfile %s...", locustfile) logging.info("\n\nOpen http://localhost:8089/ to access the Locust dashboard.\n\n") # The host value is unused when full URLs are used in the locustfile. subprocess.run(["locust", "-f", os.fspath(locustfile), "--host=unused"], check=True) def _deploy_in_kubernetes( locustfile: os.PathLike, worker_pods: int, manifests_path: os.PathLike, clean_deployment: bool, storage_method: StorageMethod, s3_bucket: Optional[str], s3_key: Optional[str], ) -> None: if worker_pods < 0: raise ValueError(f"Expected a positive number of pods, got {worker_pods}.") logging.info( "Deploying Locust in Kubernetes with locustfile %s and %s worker pods...", locustfile, worker_pods, ) manifests = manifest_set.from_directory(manifests_path) storage = storage_method.build_storage(manifests, s3_bucket, s3_key) if clean_deployment: deployer.delete_resources(manifests, storage) deployer.update_worker_pods(manifests, worker_pods) deployer.create_resources(manifests, storage, locustfile) logging.info( "\n\nOpen %s to access the Locust dashboard.\n\n", manifests.ingress.host ) PK!Ha#!%zelt-1.2.0.dist-info/entry_points.txtN+I/N.,()J)M̳JPK!d*++zelt-1.2.0.dist-info/LICENSEMIT License Copyright (c) 2019 Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PK!HڽTUzelt-1.2.0.dist-info/WHEEL A н#Z;/"d&F[xzw@Zpy3Fv]\fi4WZ^EgM_-]#0(q7PK!Had4n#zelt-1.2.0.dist-info/METADATAX[s6}kkEʉtq<-&$3d,$$aEڕd,n&J-UgoD} 6믣7~0>z'~~rb  7wi<َYR3(RUd".gs(UIm}e=:z=#d?zG?o-J!TxO뷄|ehJ*3Q:ۅ7L"~ N*'=[Fo܎(n`V?<&3 NaqJ:n+\P<[&5 w Y%LSB6I7%L{bk;_$E?e/m԰|Y?\Bw=Z9,Ќr,H!NA,b#KSUtЌq)~8%5> gk稸*K,]SrQ،l_MsBZ&TJҤEa Lx'N|~asװL(^Cr-;K-v488gax޾<|<G`4{ܔ 2Txz@o*8 GsP6B=Y13 ԥQےi&jƊ$b˙#<3vȑ;fIh2A%()֛C򔷊B˙&&U`+_2gɗKD%3|xz|] 5OnF|Y9{5yր`׺O+O4Ȥiڽ^N< #)_n>OLD?+eUс!U˓54UjƜt {൅-Q#C-'3Jl˓ |hi챭CO1-z(4'fUd)ȅ1 bZ'uTl k UO 1}0|J/  tE- ݛnt}g@/t?_W"Jܸܳ$~ Z.Fū"CVV~AE؟ih  `=aɩv4lyT)JJ\o:Qo:Z((;vaZ8TGk.D,K`r[^ص};65V1/+" TS"KȺL-yW etYgnʾVRyyͱč ppr;\eɗg {WaƳ& BkW΁EREZfqi g3 txV֒;ac<7hI qT;v:,"!}ӹNJr fCFƠ8rbP2-0;µ Hm(@#RQy22O`r5ޘ![{|UyL p\ NϯF_v& }~H4Yp! y9P{PBs~ZE݉T5KCN4.DzXVtoG3_dd"݌MӌvZIiq"JRhFSi۽&8"8J?* xhsiP64ߊA ,u Ȑ6A?PK!HQ zelt-1.2.0.dist-info/RECORD@} â Ť l`% |}阘hzQ61|H`[vH\mxeyZfcJ_AaVtڢJZߎZҳ&*ceB>m k8i4O{sxx Rzelt/zelt.pyPK!Ha#!%Kizelt-1.2.0.dist-info/entry_points.txtPK!d*++izelt-1.2.0.dist-info/LICENSEPK!HڽTUnzelt-1.2.0.dist-info/WHEELPK!Had4n#nzelt-1.2.0.dist-info/METADATAPK!HQ vzelt-1.2.0.dist-info/RECORDPKy