PK aLݠZ Z cowfish/__init__.py"A useful asynchronous library that is built on top of aiobotocore" __version__ = "0.2.7" PK L+<͌ cowfish/firehose.pyimport time import json import logging import asyncio import aiobotocore from .worker import BatchWorker logger = logging.getLogger(__package__) class Firehose: service_name = "firehose" MAX_RETRY = 10 def __init__( self, stream_name: str, region_name: str, encode_func: "function" = None, delimiter: bytes = b"\n", *, worker_params: dict = None, client_params: dict = None, original_api: bool = False ): self.session = aiobotocore.get_session() self.stream_name = stream_name self.encode_func = encode_func or (lambda o: json.dumps(o).encode()) self.delimiter = delimiter client_params = client_params or {} client_params["region_name"] = region_name self.client = self.session.create_client(self.service_name, **client_params) worker_params = worker_params or {} batch_func = self.original_batch if original_api else self.write_batch self.worker = BatchWorker(batch_func, **worker_params) def __repr__(self): return "<{}: stream={}, worker={!r}>".format( self.__class__.__name__, self.stream_name, self.worker ) async def put(self, obj): return await self.worker.put(obj) async def stop(self): timestamp = time.time() await self.worker.stop() await self.client.close() cost = time.time() - timestamp logger.info("{0!r} stopped in {1:.1f} seconds".format(self, cost)) def _encode(self, obj_list: list) -> bytes: encoded = [self.encode_func(obj) for obj in obj_list] encoded.append(b"") return self.delimiter.join(encoded) async def write_batch(self, obj_list: list): try: data = self._encode(obj_list) await self.client.put_record( DeliveryStreamName=self.stream_name, Record={"Data": data} ) except Exception as e: logger.exception(e) return obj_list async def original_batch(self, obj_list: list, _seq: int = 0): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY: raise Exception("write_batch error: firehose put_record_batch failed") try: resp = await self.client.put_record_batch( DeliveryStreamName=self.stream_name, Records=[ {"Data": self.encode_func(obj) + self.delimiter} for obj in obj_list ], ) except Exception as e: logger.exception(e) return obj_list if resp["FailedPutCount"] > 0: failed_obj_list = [ obj_list[i] for i, record in enumerate(resp["RequestResponses"]) if "ErrorCode" in record ] return await self.write_batch(failed_obj_list, _seq=_seq + 1) PK ʋL:N cowfish/kinesis.pyimport os import time import json import base64 import asyncio import logging import aiobotocore from .worker import BatchWorker try: import msgpack except ImportError: pass logger = logging.getLogger(__package__) class Kinesis: service_name = "kinesis" MAX_RETRY = 10 def __init__( self, stream_name: str, region_name: str, encode_func: "function" = None, key_func: "function" = None, *, worker_params: dict = None, client_params: dict = None ): self.session = aiobotocore.get_session() self.stream_name = stream_name self.encode_func = encode_func or (lambda o: json.dumps(o).encode()) self.key_func = key_func client_params = client_params or {} client_params["region_name"] = region_name self.client = self.session.create_client(self.service_name, **client_params) worker_params = worker_params or {} self.worker = BatchWorker(self.write_batch, **worker_params) def __repr__(self): return "<{}: stream={}, worker={!r}>".format( self.__class__.__name__, self.stream_name, self.worker ) async def stop(self): timestamp = time.time() await self.worker.stop() await self.client.close() cost = time.time() - timestamp logger.info("{0!r} stopped in {1:.1f} seconds".format(self, cost)) def _get_key(self, obj) -> bytes: if self.key_func is None: return base64.b64encode(os.urandom(24)).decode("ascii") return self.key_func(obj) def _encode(self, obj): return self.encode_func(obj) async def write_batch(self, obj_list: list, _seq: int = 0): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY: raise Exception("write_batch error: kinesis put_records failed", obj_list) try: resp = await self.client.put_records( StreamName=self.stream_name, Records=[ {"PartitionKey": self._get_key(obj), "Data": self._encode(obj)} for obj in obj_list ], ) except Exception as e: logger.exception(e) return obj_list if resp["FailedRecordCount"] > 0: failed_obj_list = [ obj_list[i] for i, record in enumerate(resp["Records"]) if "ErrorCode" in record ] return await self.write_batch(failed_obj_list, _seq=_seq + 1) async def write_one(self, obj, queued: bool = False, _seq: int = 0): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY - 1: raise Exception("write_one error: kinesis put_record failed") if queued: await self.worker.put(obj) return try: return await self.client.put_record( StreamName=self.stream_name, Data=self._encode(obj), PartitionKey=self._get_key(obj), ) except Exception as e: logger.exception(e) return await self.write_one(obj, queued=False, _seq=_seq + 1) class CompactKinesis(Kinesis): def __init__(self, *args, **kw): super().__init__(*args, **kw) self.buffer = bytearray() try: self.packer = msgpack.Packer() except NameError: raise ImportError("need msgpack") self.bufmax = 1024 * 25 def _encode(self, obj): return obj async def write(self, obj, flush: bool = False): packed = self.packer.pack(obj) if len(self.buffer) + len(packed) >= self.bufmax: payload = bytes(self.buffer) self.buffer.clear() self.buffer.extend(packed) await self.write_one(payload, queued=True) else: self.buffer.extend(packed) if flush and self.buffer: payload = bytes(self.buffer) self.buffer.clear() await self.write_one(payload, queued=True) async def flush(self): if self.buffer: payload = bytes(self.buffer) self.buffer.clear() await self.write_one(payload, queued=True) async def write_fluent(self, label: str, data: str): timestamp = int(time.time()) packet = (label, timestamp, data) await self.write(packet) async def stop(self): if self.buffer: payload = bytes(self.buffer) self.buffer.clear() await self.write_one(payload, queued=True) await super().stop() PK L-.ҷ cowfish/pool.pyimport asyncio import collections class Pool: def __init__(self, factory: "function", minsize: int = 1, maxsize: int = 10): self.factory = factory self.minsize = minsize self.maxsize = maxsize self._pool = collections.deque(maxlen=maxsize) self._cond = asyncio.Condition(asyncio.Lock()) self._using = set() self._acquiring = 0 self._close_state = asyncio.Event() self._fill_free(override_min=False) def __repr__(self): return "<{}: size={}>".format(self.__class__.__name__, self.size) @property def size(self) -> int: return self.freesize + len(self._using) + self._acquiring @property def freesize(self) -> int: return len(self._pool) async def clear(self): async with self._cond: while self._pool: client = self._pool.popleft() await client.close() async def _do_close(self): async with self._cond: assert self._acquiring == 0, self._acquiring while self._pool: client = self._pool.popleft() await client.close() for client in self._using: await client.close() self._using.clear() async def close(self): if not self._close_state.is_set(): self._close_state.set() await self._do_close() @property def closed(self): return self._close_state.is_set() async def acquire(self): async with self._cond: while True: self._fill_free(override_min=True) if self.freesize: client = self._pool.popleft() assert client not in self._using, (client, self._using) self._using.add(client) return client else: await self._cond.wait() async def release(self, client): assert client in self._using self._using.remove(client) self._pool.append(client) await self._wakeup() async def _wakeup(self): async with self._cond: self._cond.notify() def _fill_free(self, *, override_min: bool): while self.size < self.minsize: self._acquiring += 1 try: client = self.factory() self._pool.append(client) finally: self._acquiring -= 1 if self.freesize: return if override_min: while not self._pool and self.size < self.maxsize: self._acquiring += 1 try: client = self.factory() self._pool.append(client) finally: self._acquiring -= 1 async def auto_release(self, client, coro): try: return await coro finally: await self.release(client) def get(self): return AsyncClientContextManager(self) class AsyncClientContextManager: __slots__ = ("_pool", "_client") def __init__(self, pool): self._pool = pool self._client = None async def __aenter__(self): self._client = await self._pool.acquire() return self._client async def __aexit__(self, exc_type, exc_value, tb): try: await self._pool.release(self._client) finally: self._pool = None self._client = None PK L cowfish/sqs.pyimport time import base64 import pickle import logging import asyncio import functools import aiobotocore from .worker import BatchWorker logger = logging.getLogger(__package__) class SQSWriter: service_name = "sqs" MAX_RETRY = 10 def __init__( self, queue_name: str, region_name: str, encode_func: "function" = None, *, worker_params: dict = None, client_params: dict = None ): self.session = aiobotocore.get_session() self.queue_name = queue_name self.is_fifo = queue_name.endswith(".fifo") self.encode_func = encode_func or ( lambda o: base64.b64encode(pickle.dumps(o, 2)).decode("ascii") ) client_params = client_params or {} client_params["region_name"] = region_name self.client = self.session.create_client(self.service_name, **client_params) self.QueueUrl = None self.lock = asyncio.Lock() worker_params = worker_params or {} self.worker = BatchWorker(self.write_batch, **worker_params) def __repr__(self): return "<{}: queue={}, worker={!r}>".format( self.__class__.__name__, self.queue_name, self.worker ) async def _get_queue_url(self): if self.QueueUrl is None: async with self.lock: if self.QueueUrl is None: resp = await self.client.get_queue_url(QueueName=self.queue_name) self.QueueUrl = resp["QueueUrl"] return self.QueueUrl async def stop(self): timestamp = time.time() await self.worker.stop() await self.client.close() cost = time.time() - timestamp logger.info("{0!r} stopped in {1:.1f} seconds".format(self, cost)) def _encode(self, obj): return self.encode_func(obj) async def write_one( self, record, delay_seconds: int = 0, deduplication_id=None, group_id=None, queued: bool = False, _seq: int = 0, **attributes ): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY - 1: raise Exception("write_one error: SQS send_message failed") message = { "record": record, "delay_seconds": delay_seconds, "attributes": attributes, "params": {}, } if self.is_fifo: if deduplication_id: message["params"]["MessageDeduplicationId"] = deduplication_id if group_id: message["params"]["MessageGroupId"] = group_id if queued: await self.worker.put(message) return try: return await self.client.send_message( QueueUrl=(await self._get_queue_url()), MessageBody=self._encode(record), DelaySeconds=delay_seconds, MessageAttributes=attributes, **message["params"] ) except Exception as e: logger.exception(e) return await self.write_one( record, delay_seconds=delay_seconds, deduplication_id=deduplication_id, group_id=group_id, queued=False, _seq=_seq + 1, **attributes ) async def write_batch(self, obj_list, _seq=0): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY - 1: raise Exception( "write_batch error: SQS send_message_batch failed", obj_list ) Entries = [ { "Id": str(i), "MessageBody": self._encode(message["record"]), "DelaySeconds": message["delay_seconds"], "MessageAttributes": message["attributes"], **message["params"], } for i, message in enumerate(obj_list) ] try: resp = await self.client.send_message_batch( QueueUrl=(await self._get_queue_url()), Entries=Entries ) except Exception as e: logger.exception(e) return obj_list if "Failed" in resp: logger.error("Send failed: {}, {}".format(obj_list, resp["Failed"])) failed_obj_list = [ obj_list[int(d["Id"])] for d in resp["Failed"] if not d["SenderFault"] ] return await self.write_batch(failed_obj_list, _seq=_seq + 1) def async_rpc( self, func: "function" = None, *, delay_seconds: int = 0, deduplication_id=None, group_id=None, queued: bool = True, **attributes ): if func is None: return functools.partial( self.async_rpc, delay_seconds=delay_seconds, deduplication_id=deduplication_id, group_id=group_id, queued=queued, **attributes ) if type(func) == str: async def async_func(*args, **kw): record = {"fpath": func, "args": args, "kw": kw} await self.write_one( record, delay_seconds=delay_seconds, deduplication_id=deduplication_id, group_id=group_id, queued=queued, **attributes ) else: @functools.wraps(func) async def async_func(*args, **kw): fpath = func.__module__ + "." + func.__name__ record = {"fpath": fpath, "args": args, "kw": kw} await self.write_one( record, delay_seconds=delay_seconds, deduplication_id=deduplication_id, group_id=group_id, queued=queued, **attributes ) return async_func class StringAttribute(dict): def __init__(self, value): self["DataType"] = "String" self["StringValue"] = str(value) class BinaryAttribute(dict): def __init__(self, value): self["DataType"] = "Binary" self["BinaryValue"] = bytes(value) class NumberAttribute(dict): def __init__(self, value): self["DataType"] = "Number" self["StringValue"] = str(value) PK L#- - cowfish/sqsprocesser.pyimport sys import base64 import pickle import signal import inspect import logging import asyncio import argparse import importlib import aiobotocore from . import utils from .worker import BatchWorker from . import __version__ logger = logging.getLogger(__package__) __description__ = "An AWS SQS processer using asyncio/aiobotocore" class SQSRetry(Exception): def __init__(self, *, max_times: int, after: int = None): self.max_times = max_times self.seconds_later = after def delay_seconds(self, retry_times): if self.seconds_later is None: return 1 + 2 ** retry_times return self.seconds_later class Message(dict): @property def message_id(self): return self["MessageId"] @property def body(self): if not hasattr(self, "_body"): Body = self["Body"] self._body = pickle.loads(base64.b64decode(Body)) return self._body @property def attributes(self): return self.get("Attributes", {}) class SQSProcesser: service_name = "sqs" MAX_RETRY = 10 def __init__( self, queue_name: str, region_name: str, message_handler: "callable or coroutine", *, concurrency: int = 10, visibility_timeout: int = 60, idle_sleep: int = 0, batch_ops: bool = True, client_params: dict = None, delete_worker_params: dict = None, change_worker_params: dict = None, loop=None ): self.queue_name = queue_name self.concurrency = concurrency self.message_handler = message_handler self.visibility_timeout = visibility_timeout self.idle_sleep = idle_sleep self.QueueUrl = None self.hooks = {"after_server_stop": set()} client_params = client_params or {} client_params["region_name"] = region_name self.lock = asyncio.Lock() self.session = aiobotocore.get_session() self.loop = loop or asyncio.get_event_loop() self.quit_event = asyncio.Event() self.loop.add_signal_handler(signal.SIGINT, self.quit_event.set) self.loop.add_signal_handler(signal.SIGTERM, self.quit_event.set) self.semaphore = asyncio.Semaphore(concurrency) self.futures = set() self.client = self.session.create_client(self.service_name, **client_params) if batch_ops: delete_worker_params = delete_worker_params or {} self.delete_worker = BatchWorker(self.delete_batch, **delete_worker_params) change_worker_params = change_worker_params or {} self.change_worker = BatchWorker(self.change_batch, **change_worker_params) else: self.delete_worker = None self.change_worker = None def __repr__(self): return "<{}: queue={}, client={}, concurrency={}, working={}>".format( self.__class__.__name__, self.queue_name, self.client, self.concurrency, len(self.futures), ) async def _get_queue_url(self) -> str: if self.QueueUrl is None: async with self.lock: if self.QueueUrl is None: resp = await self.client.get_queue_url(QueueName=self.queue_name) self.QueueUrl = resp["QueueUrl"] return self.QueueUrl async def run_forever(self): while not self.quit_event.is_set(): try: await self._fetch_messages() except Exception as e: logger.exception(e) continue await self.close() async def close(self): if self.futures: await asyncio.wait(self.futures) if self.change_worker: await self.change_worker.stop() if self.delete_worker: await self.delete_worker.stop() await self.client.close() async def _fetch_messages(self): job = self.client.receive_message( QueueUrl=(await self._get_queue_url()), AttributeNames=["ApproximateReceiveCount"], MessageAttributeNames=["All"], MaxNumberOfMessages=10, VisibilityTimeout=self.visibility_timeout, WaitTimeSeconds=20, ) response = await utils.cancel_on_event(job, self.quit_event) if self.quit_event.is_set(): return if "Messages" not in response and self.idle_sleep > 0: await asyncio.sleep(self.idle_sleep) if "Messages" in response: for message_dict in response["Messages"]: await self.semaphore.acquire() fut = asyncio.ensure_future(self.handle(Message(message_dict))) self.futures.add(fut) fut.add_done_callback(self.futures.remove) async def handle(self, message): try: delete = True try: result = self.message_handler(message) # if asyncio.iscoroutine(result): if inspect.isawaitable(result): await result except Exception as e: if e.__class__.__name__ == SQSRetry.__name__: receive_count = int( message.attributes.get("ApproximateReceiveCount", 1) ) if e.max_times > receive_count: seconds = e.delay_seconds(receive_count) await self.change_one(message, seconds) delete = False else: logger.exception(e) finally: if delete: await self.delete_one(message) finally: self.semaphore.release() async def change_batch(self, messages, _seq: int = 0): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY: raise Exception("change_message_visibility_batch failed", messages) try: resp = await self.client.change_message_visibility_batch( QueueUrl=(await self._get_queue_url()), Entries=[ { "Id": str(index), "ReceiptHandle": message["ReceiptHandle"], "VisibilityTimeout": timeout, } for index, (message, timeout) in enumerate(messages) ], ) except Exception as e: logger.exception(e) return messages if "Failed" in resp: logger.error("Change failed: {} {}".format(messages, resp["Failed"])) server_failed_messages = [messages[int(d["Id"])] for d in resp["Failed"]] return await self.change_batch(server_failed_messages, _seq + 1) async def change_one(self, message, visibility_timeout: int): if self.change_worker: await self.change_worker.put((message, visibility_timeout)) return return await self.client.change_message_visibility( QueueUrl=(await self._get_queue_url()), ReceiptHandle=message["ReceiptHandle"], VisibilityTimeout=visibility_timeout, ) async def delete_batch(self, messages, _seq: int = 0): if _seq > 0: await asyncio.sleep(0.1 * (2 ** _seq)) if _seq > self.MAX_RETRY: raise Exception("delete_message_batch failed", messages) try: resp = await self.client.delete_message_batch( QueueUrl=(await self._get_queue_url()), Entries=[ {"Id": str(index), "ReceiptHandle": message["ReceiptHandle"]} for index, message in enumerate(messages) ], ) except Exception as e: logger.exception(e) return messages if "Failed" in resp: logger.error("Delete failed: {}, {}".format(messages, resp["Failed"])) server_failed_messages = [ messages[int(d["Id"])] for d in resp["Failed"] if not d["SenderFault"] ] return await self.delete_batch(server_failed_messages, _seq + 1) async def delete_one(self, message): if self.delete_worker: await self.delete_worker.put(message) return return await self.client.delete_message( QueueUrl=(await self._get_queue_url()), ReceiptHandle=message["ReceiptHandle"], ) def after_server_stop(self, func: "function"): self.hooks["after_server_stop"].add(func) def start(self): try: self.loop.run_until_complete(self._get_queue_url()) except Exception: self.loop.run_until_complete(self.close()) self.loop.run_until_complete(self.loop.shutdown_asyncgens()) self.loop.close() print( "The queue {} doesn't exists, exiting...".format(self.queue_name), file=sys.stderr, ) sys.exit(1) try: self.loop.run_until_complete(self.run_forever()) finally: try: for func in self.hooks["after_server_stop"]: if asyncio.iscoroutinefunction(func): self.loop.run_until_complete(func(self.loop)) else: func(self.loop) except Exception as e: logger.exception(e) self.loop.run_until_complete(self.loop.shutdown_asyncgens()) self.loop.close() def import_function(path: str) -> "function": module_path, func_name = path.rsplit(".", 1) module = importlib.import_module(module_path) return getattr(module, func_name) async def plain_handler(message): print(message) async def rpc_handler(message): record = message.body func = import_function(record["fpath"]) func = getattr(func, "_real", func) result = func(*record["args"], **record["kw"]) if inspect.isawaitable(result): result = await result log = "{0}:{1}({2},{3})={4}".format( message.message_id, func.__name__, ", ".join(["{0!r}".format(arg) for arg in record["args"]]), ", ".join(["{0}={1}".format(k, v) for k, v in record["kw"].items()]), result, ) logger.info(log) def main(): epilog = "version info: cowfish/{} aiobotocore/{}".format( __version__, aiobotocore.__version__ ) parser = argparse.ArgumentParser(description=__description__, epilog=epilog) parser.add_argument("queue_name") parser.add_argument("region") parser.add_argument( "-c", "--concurrency", type=int, default=20, help="default to 20" ) parser.add_argument( "-v", "--visibility-timeout", type=int, default=60, help="default to 60" ) parser.add_argument("--handler", type=str, help="default to rpc_handler") parser.add_argument( "--no-batch", dest="batch", action="store_false", default=True, help="do not use batch ops", ) args = parser.parse_args() if args.handler is None: handler = rpc_handler else: handler = import_function(args.handler) processer = SQSProcesser( args.queue_name, args.region, handler, concurrency=args.concurrency, visibility_timeout=args.visibility_timeout, batch_ops=args.batch, ) processer.start() if __name__ == "__main__": main() PK LU cowfish/utils.pyimport asyncio async def cancel_on_event(coro, event: asyncio.Event): event_task = asyncio.ensure_future(event.wait()) done, pending = await asyncio.wait( [event_task, coro], return_when=asyncio.FIRST_COMPLETED ) if pending: pending.pop().cancel() while done: task = done.pop() if task is not event_task: return task.result() def format_params(params: dict) -> str: return ", ".join("{}={}".format(k, v) for k, v in params.items()) class ClientMethodProxy: def __init__(self, pool, name: str): self._pool = pool self.name = name async def __call__(self, *args, **kw): client = await self._pool.acquire() try: return await getattr(client, self.name)(*args, **kw) finally: try: await self._pool.release(client) finally: self._pool = None PK ͒LU U cowfish/worker.pyimport time import logging import asyncio import async_timeout logger = logging.getLogger(__package__) class BatchWorker: """ BatchWorker holds some amount of objects for a few seconds before sending them out. """ def __init__( self, handler, *, maxsize: int = 0, aggr_num: int = 10, timeout: int = 5, concurrency: int = 10 ): self.queue = asyncio.Queue(maxsize) self.handler = handler self.aggr_num = aggr_num self.timeout = timeout self.concurrency = concurrency self.semaphore = asyncio.Semaphore(concurrency) self.quit = object() self.shutdown = False self.fut = None self.futures = set() self.start() def __repr__(self): return "<{}: qsize={}, concurrency={}, working={}>".format( self.__class__.__name__, self.queue.qsize(), self.concurrency, len(self.futures), ) @property def qsize(self) -> int: return self.queue.qsize() async def put(self, obj): await self.queue.put(obj) def start(self): self.fut = asyncio.ensure_future(self.run()) async def stop(self): logger.info("Stopping {0!r}".format(self)) await self.queue.put(self.quit) if self.fut: await self.fut async def _get_obj_list(self) -> list: obj_list = [] timeout = self.timeout while timeout > 0 and len(obj_list) < self.aggr_num: timestamp = time.time() try: async with async_timeout.timeout(timeout): obj = await self.queue.get() except asyncio.TimeoutError: break if obj is self.quit: self.shutdown = True break obj_list.append(obj) timeout -= time.time() - timestamp return obj_list async def run(self): logger.info("Starting {0!r}".format(self)) while not self.shutdown: obj_list = await self._get_obj_list() if not obj_list: continue await self.semaphore.acquire() fut = asyncio.ensure_future(self.handle(obj_list)) self.futures.add(fut) fut.add_done_callback(self.futures.remove) if self.futures: await asyncio.wait(self.futures) async def handle(self, obj_list: list): try: try: result = self.handler(obj_list) if asyncio.iscoroutine(result): result = await result if result: for obj in result: await self.queue.put(obj) except Exception as e: logger.exception(e) finally: self.semaphore.release() PK !HfR^2 : ( cowfish-0.2.7.dist-info/entry_points.txtN+I/N.,()*.,.(ON-.N-M/O,CM PK oKW5) ) cowfish-0.2.7.dist-info/LICENSEMIT License Copyright (c) 2017 guyingbo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PK !HN O cowfish-0.2.7.dist-info/WHEELHM K-*ϳR03rOK-J,/RH,zd&Y)r$[)T&Ur PK !HBhw cowfish-0.2.7.dist-info/METADATAUO0&>S`)R:[Є&&"dh;T6w{*aoHч@l|4a0eZe0ȁm XPXj FEEDb=/6.