PKhCH pandaspipe/mapper.py# -*- coding:utf-8 -*- import inspect import logging import pandas as pd from base import PipelineEntity _log = logging.getLogger(__name__) def mapping(func=None, result=None, required_columns=(), external_pipe=()): def process(dec_func): dec_func.is_mapping = True dec_func.type = 'reduce' if not getattr(dec_func, 'mapping_info', False): dec_func.mapping_info = [] dec_func.mapping_info.append([ result if result else dec_func.__name__, required_columns if len(required_columns) != 0 else dec_func.func_code.co_varnames[1:] ]) dec_func.external_pipe = external_pipe return dec_func if func: return process(func) return process def mixin(func=None, result=None, external_pipe=()): def process(dec_func): dec_func.is_mapping = True dec_func.type = 'mixin' dec_func.result = result if result else dec_func.__name__ dec_func.external_pipe = external_pipe return dec_func if func: return process(func) return process class Mapper(PipelineEntity): def __init__(self): super(Mapper, self).__init__() self.priority = 20 self.reduce_mapping = [] self.mixin_mapping = [] self.flat_mapping = [] def register(self, pipeline): _log.info('Register Mapper %s in %s pipeline' % (self.__class__.__name__, pipeline.name)) map_functions = inspect.getmembers(self, predicate=lambda func: isinstance(func, inspect.types.MethodType) and getattr(func, 'is_mapping', False)) for name, map_function in map_functions: self.external_dependencies.extend(map_function.external_pipe) if map_function.type == 'reduce': _log.debug('Add function %s to mapper %s' % (name, self.__class__.__name__)) for mapping_info in map_function.mapping_info: self.reduce_mapping.append((map_function, mapping_info[0], mapping_info[1])) elif map_function.type == 'mixin': _log.debug('Add function %s to mapper %s' % (name, self.__class__.__name__)) self.mixin_mapping.append((map_function, map_function.result)) else: _log.info('Ignore mapping function %s in class %s with unknown type %s' % ( name, self.__class__.__name__, map_function.type)) def __call__(self, df): """(Mapper, type(df)) -> type(new_df) """ new_df = pd.DataFrame() if len(df) != 0: for map_function, result, map_columns in self.reduce_mapping: args = [df[column] for column in map_columns] new_df[result] = map(map_function, *args) for map_function, result in self.mixin_mapping: mixin_value = map_function() new_df[result] = mixin_value return new_df PKuHpandaspipe/__init__.py# -*- coding:utf-8 -*- from pandaspipe.mapper import Mapper, mapping, mixin from pandaspipe.filter import Filter, NumberFilter, column_condition, columns_condition, type_condition from pandaspipe.converter import Converter, converter from pandaspipe.data import CSVDataSource, DataSource, ConstantDataSource, DataOutlet, importing, exporting from pandaspipe.merger import Merger, SimpleMerger, merger, simple_merger from pandaspipe.base import PipelineEntity, PipelineUnsupportedOperation from pandaspipe.pipeline import Pipeline PKCH B(B(pandaspipe/pipeline.py# -*- coding:utf-8 -*- import abc import sys import inspect import types import itertools import networkx as nx from _util import patch_list, isSubset from base import PipelineEntity import logging _log = logging.getLogger(__name__) _log.addHandler(logging.StreamHandler(stream=sys.stdout)) class Pipeline: def __init__(self, name='Undefined Pipeline', env=None): """(Pipeline, str) -> NoneType Creating the contents of the Pipeline Object """ if env is None: env = {} self._entities = [] self.name = name self.env = env self.graph = None def process(self, channels=('root',), ignore_outlet_node=False, output_channels=()): """(Pipeline, pandas.DataFrame, str) -> type(df_map) *Description* :param ignore_outlet_node: """ start_nodes = [self._get_start_node(channel) for channel in channels] active_dfs = {} active_nodes = [] acomplete_nodes = self.graph.nodes() complete_nodes = [] active_nodes.extend(start_nodes) while len(active_nodes) > 0: next_nodes = [] processed = False for active_node in active_nodes: pred_nodes = self.graph.pred.get(active_node).keys() depencencies = active_node.external_dependencies if (len(pred_nodes) == 0 or isSubset(complete_nodes, pred_nodes)) and isSubset(active_dfs.keys(), depencencies): _log.info('Call entity %s' % active_node) processed = True # Process parameters = [active_dfs[channel] for channel in active_node.input_channels] if active_node.type in ('node', 'bignode'): external_dependencies = {} if active_node.external_dependencies: for external_dependency in active_node.external_dependencies: external_dependencies[external_dependency] = active_dfs[external_dependency] self.env['ext_dep'] = external_dependencies result = active_node(*parameters) active_nodes.remove(active_node) complete_nodes.append(active_node) acomplete_nodes.remove(active_node) # Update active dataframes if len(active_node.output_channels) == 1: active_dfs[active_node.output_channels[0]] = result elif len(active_node.output_channels) > 1: active_dfs.update(result) # Add next nodes for node in self.graph.succ.get(active_node).keys(): if node not in active_nodes and node not in next_nodes: next_nodes.append(node) if not processed: _log.error('Infinite cycle detected!') return None active_nodes.extend(next_nodes) # Clear useless dfs # Check if required by next node for channel in active_dfs.keys(): if channel not in output_channels and len( [active_node for active_node in active_nodes if channel in active_node.input_channels]) == 0: # Check if required by external dependencies required = reduce(lambda x, y: x or y, [channel in node.external_dependencies for node in acomplete_nodes], False) if not required: active_dfs.pop(channel) if len(active_dfs.keys()) == 1: return active_dfs.values()[0] return active_dfs def append(self, cls, channel=None, output_channel=None, construct_arguments=()): """(Pipeline, classobj, str, str) -> NoneType *Description* :param construct_arguments: :param cls: :param channel: :param output_channel: """ self(channel, output_channel, construct_arguments=construct_arguments)(cls) def build_process_graph(self): builder = GraphBuilder(self._entities) return builder.build() def _check_graph(self): if self.graph is None: self.graph = self.build_process_graph() def _get_start_node(self, channel): self._check_graph() nodes = filter(lambda x: channel in x.output_channels and x.type == 'source', self.graph.nodes()) if len(nodes) > 0: return nodes[0] raise Exception('You can\'t use channel without source node') def _process_entity(self, cls, channel, outchannel, construct_arguments, priority): """(Pipeline, type(cls), type(channel), type(outchannel), type(entity_map)) -> type(cls) *Description* """ obj = cls(*construct_arguments) obj.env = self.env if priority: obj.priority = priority obj.register(self) self._entities.append(obj) if channel is None and len(obj.input_channels) == 0 and len(obj.output_channels) == 0: channel = 'root' if channel: if outchannel is None: outchannel = channel if obj.type == 'node': obj.input_channels = channel[:1] if isinstance(channel, list) else [channel] obj.output_channels = outchannel[:1] if isinstance(outchannel, list) else [outchannel] elif obj.type == 'bignode': patch_list(obj.input_channels, channel) patch_list(obj.output_channels, outchannel) elif obj.type == 'source': obj.input_channels = [] patch_list(obj.output_channels, outchannel) elif obj.type == 'outlet': patch_list(obj.input_channels, channel) obj.output_channels = [] else: raise Exception('Well, you use bad type for entity ....') return cls def __call__(self, channel=None, outchannel=None, construct_arguments=(), priority=None): """(Pipeline, str, str) -> type(process_function) *Description* """ def process_function(cls): """(type(cls)) -> type(self._process_entity(cls, channel, outchannel, self._filters)) *Description* :param cls: """ cls_mro = inspect.getmro(cls) if PipelineEntity in cls_mro: self._process_entity(cls, channel, outchannel, construct_arguments, priority) return cls if isinstance(channel, types.ClassType) or isinstance(channel, abc.ABCMeta): cls = channel channel = None return process_function(cls) return process_function class GraphBuilder: def __init__(self, entities): self.entities = entities self.channel_io_nodes = {} self.graph = nx.DiGraph() pass def build(self): self.graph.add_nodes_from(self.entities) self._build_inchannel_connections() self._build_multichannel_connections() self._validate_external_dependencies() return self.graph def _build_inchannel_connections(self): all_channels = set( itertools.chain(*map(lambda x: set(itertools.chain(x.input_channels, x.output_channels)), self.entities))) for channel in all_channels: # Process simple nodes channel_nodes = filter(lambda x: x.type == 'node' and channel in x.input_channels and channel in x.output_channels, self.entities) channel_nodes.sort(key=lambda x: (x.priority, x.__class__.__name__)) self.channel_io_nodes[channel] = {} if len(channel_nodes) > 0: self.channel_io_nodes[channel]['input'] = channel_nodes[0] self.channel_io_nodes[channel]['output'] = channel_nodes[-1] # noinspection PyCompatibility for i in xrange(0, len(channel_nodes) - 1): self.graph.add_edge(channel_nodes[i], channel_nodes[i + 1]) # Process outlet and source input_nodes = filter(lambda x: x.type == 'source' and channel in x.output_channels, self.entities) assert len(input_nodes) in (0, 1), 'You can\'t use many input nodes for one channel' if len(input_nodes) > 0: if len(channel_nodes) > 0: self.graph.add_edge(input_nodes[0], self.channel_io_nodes[channel]['input']) else: self.graph.add_node(input_nodes[0]) self.channel_io_nodes[channel]['output'] = input_nodes[0] output_nodes = filter(lambda x: x.type == 'outlet' and channel in x.input_channels, self.entities) self.graph.add_nodes_from(output_nodes) if len(output_nodes) > 0: self.channel_io_nodes[channel]['outlets'] = output_nodes if len(channel_nodes) > 0: for output_node in output_nodes: self.graph.add_edge(self.channel_io_nodes[channel]['output'], output_node) pass def _build_multichannel_connections(self): for node in filter(lambda x: x.type in ('bignode', 'node') and x.input_channels != x.output_channels, self.entities): for input_channel in node.input_channels: self.graph.add_edge(self.channel_io_nodes[input_channel]['output'], node) for output_channel in node.output_channels: channel_info = self.channel_io_nodes[output_channel] if not channel_info.get('input') and not channel_info.get('outlets'): raise Exception('You have problem with graph') if channel_info.get('input'): self.graph.add_edge(node, channel_info['input']) if channel_info.get('outlets'): for outlet in channel_info.get('outlets'): self.graph.add_edge(node, outlet) def _validate_external_dependencies(self): pass PKhCH%S r r pandaspipe/data.py# -*- coding:utf-8 -*- import pandas as pd import sys import logging import inspect from base import PipelineEntity _log = logging.getLogger(__name__) def importing(func): func.is_importing = True return func def exporting(func): func.is_exporting = True return func class DataSource(PipelineEntity): def __init__(self, name='NoName DataSource', **kwargs): PipelineEntity.__init__(self) self.priority = 0 self.type = 'source' self.name = name self._config = kwargs self._import_functions = [] def __call__(self): dfs = [import_function() for import_function in self._import_functions] if len(dfs) > 1: result = pd.concat(dfs, axis=1) else: result = dfs[0] return result def register(self, pipeline): _log.info('Register DataSource %s in %s pipeline' % (self.__class__.__name__, pipeline.name)) import_functions = inspect.getmembers(self, predicate=lambda func: isinstance(func, inspect.types.MethodType) and getattr(func, 'is_importing', False)) for name, import_function in import_functions: self._import_functions.append(import_function) class DataOutlet(PipelineEntity): def __init__(self, name='NoName DataOutlet', **kwargs): PipelineEntity.__init__(self) self.priority = sys.maxint self.type = 'outlet' self.name = name self._config = kwargs self._export_functions = [] def __call__(self, df): [export_function(df) for export_function in self._export_functions] return df def register(self, pipeline): _log.info('Register DataSource %s in %s pipeline' % (self.__class__.__name__, pipeline.name)) export_functions = inspect.getmembers(self, predicate=lambda func: isinstance(func, inspect.types.MethodType) and getattr(func, 'is_exporting', False)) for name, export_function in export_functions: self._export_functions.append(export_function) class CSVDataSource(DataSource): def __init__(self, source, **kwargs): DataSource.__init__(self, 'CSVDataSource', **kwargs) self._source = source @importing def load(self): return pd.read_csv(self._source, **self._config) class ConstantDataSource(DataSource): def __init__(self, df, **kwargs): DataSource.__init__(self, 'ConstantDataSource', **kwargs) self.df = df @importing def const(self): return self.df PKUxH`׫ pandaspipe/merger.py# -*- coding:utf-8 -*- import inspect import logging import pandas as pd from base import PipelineEntity _log = logging.getLogger(__name__) def simple_merger(func): func.is_merger = True func.type = 'simple' return func def merger(func=None, required_channels=None, output_channels=None): def process(dec_func): dec_func.is_merger = True dec_func.type = 'normal' dec_func.required_channels = required_channels if required_channels else dec_func.func_code.co_varnames[1:] dec_func.output_channels = output_channels if output_channels else [dec_func.__name__] return dec_func if func: return process(func) return process class Merger(PipelineEntity): def __init__(self): super(Merger, self).__init__() self.type = 'bignode' self.priority = 10 self._mergers = [] self._simple_merger = None def __call__(self, *dfs): df_map = {} new_df_map = {} for i in xrange(0, len(self.input_channels)): df_map[self.input_channels[i]] = dfs[i] for merger_function, required_channels, output_channels in self._mergers: result = merger_function(*[df_map.pop(required_channel) for required_channel in required_channels]) if not isinstance(result, list): result = [result] for i in xrange(0, len(result)): new_df_map[output_channels[i]] = result[i] rest_channels = df_map.keys() if len(rest_channels) != 0 and self._simple_merger: output_simple_channel = filter(lambda output_channel: len( filter(lambda merger_tuple: output_channel in merger_tuple[2], self._mergers)) == 0, self.output_channels)[0] new_df_map[output_simple_channel] = self._simple_merger(*df_map.values()) return new_df_map def register(self, pipeline): _log.info('Register Mapper %s in %s pipeline' % (self.__class__.__name__, pipeline.name)) merge_functions = inspect.getmembers(self, predicate=lambda func: isinstance(func, inspect.types.MethodType) and getattr(func, 'is_merger', False)) for name, merge_function in merge_functions: if merge_function.type == 'simple': if self._simple_merger: raise Exception('Merger %s has to many simple mergers' % self.__class__.__name__) _log.debug('Add %s simple merger to Merger %s' % (merge_function.__name__, self.__class__.__name__)) self._simple_merger = merge_function elif merge_function.type == 'normal': _log.debug('Add %s merger to Merger %s' % (merge_function.__name__, self.__class__.__name__)) self._mergers.append((merge_function, merge_function.required_channels, merge_function.output_channels)) self.input_channels.extend(merge_function.required_channels) self.output_channels.extend(merge_function.output_channels) class SimpleMerger(Merger): @simple_merger def merge(self, *dfs): return pd.concat(dfs, axis=1) PKhCHRàpandaspipe/base.py# -*- coding:utf-8 -*- import abc import logging _log = logging.getLogger(__name__) __all__ = ['PipelineEntity', 'PipelineUnsupportedOperation'] class PipelineEntity: """ Test """ __metaclass__ = abc.ABCMeta def __init__(self): self.env = {} self.priority = None self.type = 'node' self.input_channels = [] self.output_channels = [] self.external_dependencies = [] @abc.abstractmethod def register(self, pipeline): """ Test :param pipeline: :return: """ pass class PipelineUnsupportedOperation(Exception): """ Test """ pass PKPHVVpandaspipe/_util.py# -*- coding:utf-8 -*- def match_in_dict(keys, values): """(list, list) -> dict :param keys: list of keys :param values: list of values :return: dict with key:value from keys and values """ assert len(keys) == len(values) return { keys[i]: values[i] for i in range(0, len(keys)) } def equals_for_dict(d1, d2): """(dict(str, DataFrame), dict(str, DataFrame)) -> bool Deep compare for two specific dict :param d1: Dict with string key and dataframe values :param d2: Dict with string key and dataframe values :return: equals bool """ if len(d1.keys()) != len(d2.keys()): return False for key in d1.keys(): obj1 = d1.get(key) obj2 = d2.get(key) if obj2 is None: return False if obj1.columns.tolist() == obj2.columns.tolist(): return obj1.equals(obj2) return False def patch_list(lst, obj): if isinstance(obj, list): lst.extend(obj) else: lst.append(obj) return lst def isSubset(list_, sub_list): return set(sub_list) <= set(list_) PK3UxHd)..pandaspipe/converter.py# -*- coding:utf-8 -*- import inspect import logging from base import PipelineEntity _log = logging.getLogger(__name__) def converter(func=None, target=None): def process(dec_func): dec_func.is_converter = True dec_func.type = 'column' if not getattr(dec_func, 'column_converter_info', False): dec_func.column_converter_info = [] dec_func.column_converter_info.append( target if target is not None else dec_func.__name__ ) return dec_func if func: return process(func) return process class Converter(PipelineEntity): """ Pipeline entity, that used to convert data in dataframe Can convert only one series with one method """ def __init__(self): super(Converter, self).__init__() self.priority = 6 self.column_converters = [] def register(self, pipeline): _log.info('Register Converter %s in %s pipeline' % (self.__class__.__name__, pipeline.name)) map_functions = inspect.getmembers(self, predicate=lambda func: isinstance(func, inspect.types.MethodType) and getattr(func, 'is_converter', False)) for name, condition_function in map_functions: if condition_function.type == 'column': _log.debug('Add function %s to Converter %s' % (name, self.__class__.__name__)) for target in condition_function.column_converter_info: self.column_converters.append((condition_function, target)) else: _log.info('Ignore converter function %s in class %s with unknown type %s' % ( name, self.__class__.__name__, condition_function.type)) def __call__(self, df): """(Mapper, type(df)) -> type(new_df) """ if len(df) == 0: return df for convert_function, target in self.column_converters: if target in df.columns: df[target] = df[target].apply(convert_function) return df PK0UxHpandaspipe/filter.py# -*- coding:utf-8 -*- import logging import inspect from base import PipelineEntity _log = logging.getLogger(__name__) def columns_condition(func=None, required_columns=()): def process(dec_func): dec_func.is_condition = True dec_func.type = 'columns' if not getattr(dec_func, 'columns_condition_info', False): dec_func.columns_condition_info = [] dec_func.columns_condition_info.append( required_columns if len(required_columns) != 0 else dec_func.func_code.co_varnames[1:]) return dec_func if func: return process(func) return process def column_condition(func=None, target=None, direct_use=True): def process(dec_func): dec_func.is_condition = True dec_func.type = 'column' if not getattr(dec_func, 'column_condition_info', False): dec_func.column_condition_info = [] dec_func.column_condition_info.append([ target if target is not None else dec_func.func_code.co_varnames[1], direct_use ]) return dec_func if func: return process(func) return process def type_condition(func=None, target_columns=()): def process(dec_func): dec_func.is_condition = True dec_func.type = 'type' dec_func.target_columns = target_columns return dec_func if func: return process(func) return process class Filter(PipelineEntity): def __init__(self): super(Filter, self).__init__() self.priority = 10 self.columns_conditions = [] self.column_conditions = [] self.type_conditions = [] def register(self, pipeline): _log.info('Register Filter %s in %s pipeline' % (self.__class__.__name__, pipeline.name)) map_functions = inspect.getmembers(self, predicate=lambda func: isinstance(func, inspect.types.MethodType) and getattr(func, 'is_condition', False)) for name, condition_function in map_functions: if condition_function.type == 'columns': _log.debug('Add function %s to Filter %s' % (name, self.__class__.__name__)) for required_columns in condition_function.columns_condition_info: self.columns_conditions.append((condition_function, required_columns)) elif condition_function.type == 'column': _log.debug('Add function %s to Filter %s' % (name, self.__class__.__name__)) for target, direct_use in condition_function.column_condition_info: self.column_conditions.append((condition_function, target, direct_use)) elif condition_function.type == 'type': self.type_conditions.append(condition_function) _log.debug('Add function %s to Filter %s' % (name, self.__class__.__name__)) else: _log.info('Ignore condition function %s in class %s with unknown type %s' % ( name, self.__class__.__name__, condition_function.type)) def __call__(self, df): columns = df.columns.tolist() if len(df) == 0: return df for condition_function in self.type_conditions: target_columns = condition_function.target_columns if len(target_columns) == 0: target_columns = columns for target_column in target_columns: if not condition_function(df.dtypes[target_column]): df = df.drop(target_column, axis=1) for condition_function, target, direct_use in self.column_conditions: if direct_use: df = df[condition_function(df[target])] else: new_index = filter(lambda i: i is not None, map(lambda i, *args: i if condition_function(*args) else None, df.index, df[target])) df = df.loc[new_index] for condition_function, condition_columns in self.columns_conditions: new_index = filter(lambda i: i is not None, map(lambda i, *args: i if condition_function(*args) else None, df.index, *[df[column] for column in columns if column in condition_columns])) df = df.iloc[new_index] return df class NumberFilter(Filter): @type_condition def number_condition(self, type): return type in ['float', 'float64', 'int', 'int64'] PKVvH^- *pandaspipe-0.2.1.dist-info/DESCRIPTION.rstUNKNOWN PKVvHu؄(pandaspipe-0.2.1.dist-info/metadata.json{"extensions": {"python.details": {"contacts": [{"email": "siredvin.dark@gmail.com", "name": "siredvin", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://siredvin.github.io/"}}}, "generator": "bdist_wheel (0.26.0)", "license": "Apache 2.0", "metadata_version": "2.0", "name": "pandaspipe", "summary": "Tool to process pandas dataframes with pipe", "test_requires": [{"requires": ["pytest", "pytest-runner"]}], "version": "0.2.1"}PKVvH (pandaspipe-0.2.1.dist-info/top_level.txtpandaspipe PKVvH''\\ pandaspipe-0.2.1.dist-info/WHEELWheel-Version: 1.0 Generator: bdist_wheel (0.26.0) Root-Is-Purelib: true Tag: py2-none-any PKVvH6,#pandaspipe-0.2.1.dist-info/METADATAMetadata-Version: 2.0 Name: pandaspipe Version: 0.2.1 Summary: Tool to process pandas dataframes with pipe Home-page: http://siredvin.github.io/ Author: siredvin Author-email: siredvin.dark@gmail.com License: Apache 2.0 Platform: UNKNOWN UNKNOWN PKVvHU;/ !pandaspipe-0.2.1.dist-info/RECORDpandaspipe/__init__.py,sha256=pfrhN7iFz7Q2ArImktj66thUPz-yreqaNqOQDTHnkQE,530 pandaspipe/_util.py,sha256=D5BZVFSDj_IiIxdd_swJTiVFill2JOG5Fvh8_gvUj_c,1110 pandaspipe/base.py,sha256=ReoooHqqUTjFTlVbgvxpPH8_XTagCU5qUtWPRwXY3QU,672 pandaspipe/converter.py,sha256=5bWBUQYZfc7ns323XjRLX5NpxTpinaV_MGidrVhifO0,2094 pandaspipe/data.py,sha256=CcQ8md4SAJOXJpaWC32ybXMvmTJSPeOJ2NX3ykMo5yA,2674 pandaspipe/filter.py,sha256=7hWPkIbCtRJUizf3fuwfhkWhipgQyFXdUqLwjhKCLQs,4584 pandaspipe/mapper.py,sha256=v02Atux43QT_HqWRcbO-1LJEbSrurAUzrukhZ5QdMXw,3028 pandaspipe/merger.py,sha256=kyHV12xsSMjYzeZLxTDkFeuRoT9KIYUs19ZvfvWD46I,3243 pandaspipe/pipeline.py,sha256=6MlDdUcnz_x5IO0UnpX8Lv-mAHZOIpyNg9KsYlh1tFI,10306 pandaspipe-0.2.1.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 pandaspipe-0.2.1.dist-info/METADATA,sha256=NIJZGjQkWWxwm495FdsglnIQhJMPdtrPWqATrUvv-8c,249 pandaspipe-0.2.1.dist-info/RECORD,, pandaspipe-0.2.1.dist-info/WHEEL,sha256=JTb7YztR8fkPg6aSjc571Q4eiVHCwmUDlX8PhuuqIIE,92 pandaspipe-0.2.1.dist-info/metadata.json,sha256=-P4J3cIWUrUOLjMNC_Oc68pCJu11WfYpJRX57dTpFyg,487 pandaspipe-0.2.1.dist-info/top_level.txt,sha256=6OIbAspmziLbdyVkeWcFFjPk8QXf8rNt535tYxEyCo0,11 PKhCH pandaspipe/mapper.pyPKuH pandaspipe/__init__.pyPKCH B(B(Lpandaspipe/pipeline.pyPKhCH%S r r 6pandaspipe/data.pyPKUxH`׫ dApandaspipe/merger.pyPKhCHRàANpandaspipe/base.pyPKPHVVQpandaspipe/_util.pyPK3UxHd)..Upandaspipe/converter.pyPK0UxH]pandaspipe/filter.pyPKVvH^- *ppandaspipe-0.2.1.dist-info/DESCRIPTION.rstPKVvHu؄(gppandaspipe-0.2.1.dist-info/metadata.jsonPKVvH (rpandaspipe-0.2.1.dist-info/top_level.txtPKVvH''\\ rpandaspipe-0.2.1.dist-info/WHEELPKVvH6,#spandaspipe-0.2.1.dist-info/METADATAPKVvHU;/ !tpandaspipe-0.2.1.dist-info/RECORDPKFy