PKA> None: def model_fitter(**kwargs) -> pandas_ml_utils.model.fit.Fit: fit = self.data_engineer(self.data, **kwargs) \ .fit_classifier(self.model_provider, test_size=test_size, test_validate_split_seed=test_validate_split_seed) log.info(f'fit for { {**kwargs}}\n{fit.training_classification.confusion_count()}\n{fit.test_classification.confusion_count()}') return fit # TODO there should be a way to generate one ClassificationSummary out of several by summing or averaging self.fits = [model_fitter(**kwargs) for kwargs in self.parameter_space] self.min_needed_data = max([fit.model.min_required_data for fit in self.fits]) def predict(self) -> pd.DataFrame: df = self.data[-self.min_needed_data:] if self.min_needed_data is not None else self.data def model_predictor(model, **kwargs) -> pd.DataFrame: prediction = self.data_engineer(df, **kwargs) \ .classify(model) return prediction[-1:] predictions = [model_predictor(self.fits[i].model, **kwargs) for i, kwargs in enumerate(self.parameter_space)] return predictions def plot_heatmap(self, parameter_as_column: str): import seaborn as sns sns.heatmap(self.compute_heatmap(parameter_as_column)) def compute_heatmap(self, parameter_as_column: str): predictions = self.predict() # to group all ro indices per column index we first need to sort accordingly sorted_parameter_space = sorted(enumerate(self.parameter_space), key=lambda x: x[1][parameter_as_column]) columns = {col: [value[0] for value in parameter] for col, parameter in groupby(sorted_parameter_space, lambda x: x[1][parameter_as_column])} # assign a data frame for each column predictions = [pd.concat([predictions[row][["target", "prediction_proba"]] for row in rows], axis=0, sort=True) \ .set_index("target") \ .groupby(level=0).max() \ .rename(columns={"prediction_proba": column}) for column, rows in columns.items()] predictions = pd.concat(predictions, axis=1, sort=True).sort_index(ascending=False) return predictions PKA> 0 \ else (x, None, y, None, df_new.index, None) log.info(f" splitting ... done in {pc() - start_split_pc: .2f} sec!") # ravel one dimensional labels if len(features_and_labels.labels) == 1: y_train = y_train.ravel().astype(label_type) y_test = y_test.ravel().astype(label_type) if y_test is not None else None log.info(f"make training / test data split ... done in {pc() - start_pc: .2f} sec!") # print some statistics if needed if summary_printer is not None: summary_printer(y, y_train, y_test) # return the split return x_train, x_test, y_train, y_test, index_train, index_test, min_required_data, (names, features_and_labels.labels) def make_forecast_data(df: pd.DataFrame, features_and_labels: 'FeaturesAndLabels'): return _make_features(df[features_and_labels.features], features_and_labels) @lru_cache(maxsize=int(os.getenv('CACHE_FEATUES_AND_LABELS', '1'))) def _make_features_with_cache(df: HashableDataFrame, features_and_labels: 'FeaturesAndLabels'): log.info(f"no cache entry available for {hash(df), hash(features_and_labels)}") return _make_features(df, features_and_labels) def _make_features(df: pd.DataFrame, features_and_labels: 'FeaturesAndLabels'): start_pc = log_with_time(lambda: log.debug(" make features ...")) feature_lags = features_and_labels.feature_lags features = features_and_labels.features lag_smoothing = features_and_labels.lag_smoothing # drop nan's and copy frame df = df.dropna().copy() # generate feature matrix if feature_lags is not None: # return RNN shaped 3D arrays for feature in features: feature_series = df[feature] smoothers = None # smooth out feature if requested if lag_smoothing is not None: smoothers = SortedDict({lag: smoother(feature_series.to_frame()) for lag, smoother in lag_smoothing.items()}) for lag in feature_lags: # if smoothed values are applicable use smoothed values if smoothers is not None and len(smoothers) > 0 and smoothers.peekitem(0)[0] <= lag: feature_series = smoothers.popitem(0)[1] # assign the lagged (eventually smoothed) feature to the features frame df[f'{feature}_{lag}'] = feature_series.shift(lag) # drop all rows which got nan now df = df.dropna() # RNN shape need to be [row, time_step, feature] x = np.array([[[df.iloc[row][f'{feat}_{lag}'] for feat in features] for lag in feature_lags] for row in range(len(df))], ndmin=3) names = np.array([[f'{feat}_{lag}' for feat in features] for lag in feature_lags], ndmin=2) else: # return simple 2D arrays x = df[features].values names = features log.info(f" make features ... done in {pc() - start_pc: .2f} sec!") return df, x, names def reshape_rnn_as_ar(arr3d): if len(arr3d.shape) < 3: print("Data was not in RNN shape") return arr3d else: return arr3d.reshape(arr3d.shape[0], arr3d.shape[1] * arr3d.shape[2]) PK=> List[Dict]: if len(parameter_space) > 0: # more parameters need to be unfolded parameter, space = parameter_space.popitem() return list(np.array([unfold_parameter_space(parameter_space.copy(), {**parameters, parameter: argument}) for argument in space]).flat) else: return parameters PKA> Fit: model, train, test, index = _fit(df, model_provider, test_size = test_size, number_of_cross_validation_splits = number_of_cross_validation_splits, cache_feature_matrix = cache_feature_matrix, test_validate_split_seed = test_validate_split_seed, summary_printer = summary_printer) # assemble the result objects features_and_labels = model.features_and_labels cutoff = model[("probability_cutoff", 0.5)] loss = df[features_and_labels.loss_column] if features_and_labels.loss_column is not None else None training_classification = ClassificationSummary(train[1], model.predict(train[0]), index[0], loss, cutoff) test_classification = ClassificationSummary(test[1], model.predict(test[0]), index[1], loss, cutoff) return Fit(model, training_classification, test_classification) def backtest_classifier(df: pd.DataFrame, model: Model) -> ClassificationSummary: x, y, y_hat, index = _backtest(df, model) features_and_labels = model.features_and_labels loss = df[features_and_labels.loss_column if features_and_labels.loss_column is not None else []] return ClassificationSummary(y, y_hat, index, loss, model[("probability_cutoff", 0.5)]) def classify(df: pd.DataFrame, model: Model, tail: int = None) -> pd.DataFrame: dff = _predict(df, model, tail) # return result dff["prediction_proba"] = dff["prediction"] dff["prediction"] = dff["prediction_proba"] > model[("probability_cutoff", 0.5)] return dff PKA> 1 else y_prediction self.index = index self.loss = loss self.probability_cutoff = probability_cutoff self.confusion_matrix = self._confusion_matrix_indices() # immediately log some fit quality measures ratios = self.get_ratios() log.info(f"FN Ratio = {ratios[0]}, FP Ratio = {ratios[1]}") def set_probability_cutoff(self, probability_cutoff: float = 0.5): self.probability_cutoff = probability_cutoff self.confusion_matrix = self._confusion_matrix_indices() def _confusion_matrix_indices(self): index = self.index truth = self.y_true pred = self.y_prediction co = self.probability_cutoff try: confusion = np.array([[index[(truth == True) & (pred > co)], index[(truth == False) & (pred > co)]], [index[(truth == True) & (pred <= co)], index[(truth == False) & (pred <= co)]]]) if len(confusion[0, 0]) <= 0: log.warning("Very bad fit with 0 TP, which leads to problems in the plot") return confusion except: print(f"shapes: y_true: {self.y_true.shape}, y_pred: {self.y_prediction.shape}, index: {self.index.shape}") print("Unexpected error:", sys.exc_info()[0]) return None def get_ratios(self): cm = self.confusion_count() return cm[0,0] / (cm[1,0] + 1), cm[0,0] / (cm[0,1] + 1) def plot_backtest(self, y: pd.Series = None, size: Union[int, pd.Series] = None, figsize: Tuple[int, int] = (16, 6)): # only import if required import seaborn as sns import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters # get rid of deprecation warning register_matplotlib_converters() # check value for back test if self.loss is None and y is None: raise ValueError("No loss column defined, whether in FeaturesAndLabels nor in plot_backtest") # scatter plot where confusion squares are the colors, the loss is the size y = y if y is not None \ else self.loss.loc[self.index] if isinstance(self.loss, pd.Series) \ else self.loss[self.loss.columns[0]].loc[self.index] color = pd.Series(0, index=y.index) color.loc[self.confusion_matrix[0, 0]] = 1 color.loc[self.confusion_matrix[1, 0]] = 2 # get colors from: https://xkcd.com/color/rgb/ fig, ax = plt.subplots(figsize=figsize) ax.set_ylim([y.min() * 1.1, 1]) scatt = sns.scatterplot(x=y.index, y=y, ax=ax, size=size if size is not None else y * -1, hue=color, palette=[sns.xkcd_rgb['white'], sns.xkcd_rgb['pale green'], sns.xkcd_rgb['cerise']]) bar = sns.lineplot(x=y.index, y=self.y_prediction, ax=ax) plt.hlines(self.probability_cutoff, y.index.min(), y.index.max(), color=sns.xkcd_rgb['silver']) plt.close() return fig def confusion_loss(self): cm = self.confusion_matrix df = self.loss return np.array([[df.loc[cm[0, 0]].sum(), df.loc[cm[0, 1]].sum()], [df.loc[cm[1, 0]].sum(), df.loc[cm[1, 1]].sum()]]) def confusion_count(self): return np.array([ [len(self.confusion_matrix[0, 0]), len(self.confusion_matrix[0, 1])], [len(self.confusion_matrix[1, 0]), len(self.confusion_matrix[1, 1])], ]) def _repr_html_(self): return self._html_()._repr_html_() def _html_(self, width: str = '100%'): # only import it needed from vdom.helpers import div, p, img, table, tr, td, tbody, thead, th import matplotlib.pyplot as plt import base64 import io if self.confusion_count()[0, 0] <= 0: return p('very bad fit with 0 TP!') image = None if self.loss is not None: with io.BytesIO() as f: fig = self.plot_backtest() fig.savefig(f, format="png", bbox_inches='tight') image = base64.encodebytes(f.getvalue()).decode("utf-8") plt.close(fig) cmc = self.confusion_count() cml = self.confusion_loss() if self.loss is not None else np.array([[0, 0], [0, 0]]) return div( table( thead( tr( th("Classification Count", style={'text-align': 'left'}), th("Classification Loss", style={'text-align': 'right'}) ) ), tbody( tr( td(self._matrix_table(cmc)), td(self._matrix_table(cml), style={'float': 'right'}) ), tr( td( img(src=f'data:image/png;base64,{image}', style={'width': '100%'}) if image is not None else "", colspan='2' ) ) ), style={'width': '100%'} ), style={'width': width} ) def _matrix_table(self, mx: np.array): from vdom.helpers import table, tr, td, tbody, thead row_label = [[td("True")], [td("False")]] colors = [['green', 'orange'], ['red', 'grey']] return table( thead( tr( td("Prediction / Truth"), td("True"), td("False") ) ), tbody( [tr( row_label[row] + [td( f'{mx[row, col]: .2f}', style={'color': colors[row][col]}) for col in range(mx.shape[1])]) for row in range(mx.shape[0])] ) ) def __len__(self): return len(self.y_true) def __str__(self) -> str: return f'\n{len(self.confusion_matrix[0,0])}\t{len(self.confusion_matrix[0,1])}' \ f'\n{len(self.confusion_matrix[1,0])}\t{len(self.confusion_matrix[1,1])}' PKA>= threshold) above_threshold = np.clip(above_threshold, 0.0, max_value) below_threshold = alpha * (x - threshold) * (x < threshold) return below_threshold + above_threshold def softplus(x): return np.log(1. + np.exp(x)) def softsign(x): return x / (1 + np.abs(x)) def elu(x, alpha=1.): return x * (x > 0) + alpha * (np.exp(x) - 1.) * (x < 0) def sigmoid(x): return 1. / (1. + np.exp(-x)) def hard_sigmoid(x): y = 0.2 * x + 0.5 return np.clip(y, 0, 1) def tanh(x): return np.tanh(x) def softmax(x, axis=-1): y = np.exp(x - np.max(x, axis, keepdims=True)) return y / np.sum(y, axis, keepdims=True) def l2_normalize(x, axis=-1): y = np.max(np.sum(x ** 2, axis, keepdims=True), axis, keepdims=True) return x / np.sqrt(y) def in_top_k(predictions, targets, k): top_k = np.argsort(-predictions)[:, :k] targets = targets.reshape(-1, 1) return np.any(targets == top_k, axis=-1) def binary_crossentropy(target, output, from_logits=False): if not from_logits: output = np.clip(output, 1e-7, 1 - 1e-7) output = np.log(output / (1 - output)) return (target * -np.log(sigmoid(output)) + (1 - target) * -np.log(1 - sigmoid(output))) def categorical_crossentropy(target, output, from_logits=False): if from_logits: output = softmax(output) else: output /= output.sum(axis=-1, keepdims=True) output = np.clip(output, 1e-7, 1 - 1e-7) return np.sum(target * -np.log(output), axis=-1, keepdims=False) def max(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.max(x, axis=axis, keepdims=keepdims) def min(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.min(x, axis=axis, keepdims=keepdims) def mean(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.mean(x, axis=axis, keepdims=keepdims) def var(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.var(x, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.std(x, axis=axis, keepdims=keepdims) def logsumexp(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return sp.special.logsumexp(x, axis=axis, keepdims=keepdims) def sum(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.sum(x, axis=axis, keepdims=keepdims) def prod(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.prod(x, axis=axis, keepdims=keepdims) PKA> Tuple[Model, Tuple, Tuple, Tuple]: # get a new model model = model_provider() features_and_labels = model.features_and_labels # make training and test data sets x_train, x_test, y_train, y_test, index_train, index_test, min_required_data, names = \ make_training_data(df, features_and_labels, test_size, int, test_validate_split_seed, cache=cache_feature_matrix, summary_printer=summary_printer) log.info(f"create model (min required data = {min_required_data}") model.min_required_data = min_required_data # fit the model start_performance_count = log_with_time(lambda: log.info("fit model")) if number_of_cross_validation_splits is not None: # cross validation cv = KFold(n_splits = number_of_cross_validation_splits) folds = cv.split(x_train, y_train) for f, (train_idx, test_idx) in enumerate(folds): log.info(f'fit fold {f}') model.fit(x_train[train_idx], y_train[train_idx], x_train[test_idx], y_train[test_idx]) else: # fit without cross validation model.fit(x_train, y_train, x_test, y_test) log.info(f"fitting model done in {perf_counter() - start_performance_count: .2f} sec!") return model, (x_train, y_train), (x_test, y_test), (index_train, index_test) def _backtest(df: pd.DataFrame, model: Model) -> ClassificationSummary: features_and_labels = model.features_and_labels # make training and test data with no 0 test data fraction x, _, y, _, index, _, _, names = make_training_data(df, features_and_labels, 0, int) # predict probabilities y_hat = model.predict(x) return x, y, y_hat, index def _predict(df: pd.DataFrame, model: Model, tail: int = None) -> pd.DataFrame: features_and_labels = model.features_and_labels if tail is not None: if tail <= 0: raise ValueError("tail must be > 0 or None") elif model.min_required_data is not None: # just use the tail for feature engineering df = df[-(tail + (model.min_required_data - 1)):] else: log.warning("could not determine the minimum required data from the model") # then re assign data frame with features only dff, x, _ = make_forecast_data(df, features_and_labels) # first save target columns and loss column if features_and_labels.target_columns is not None: dff = dff.join(df[features_and_labels.target_columns].add_prefix("traget_")) if features_and_labels.loss_column is not None: dff["loss"] = df[features_and_labels.loss_column] # predict on features prediction = model.predict(x) if len(prediction.shape) > 1 and prediction.shape[1] > 1: for i in range(prediction.shape[1]): dff[f"prediction_{model.features_and_labels.labels[i]}"] = prediction[:,i] else: dff["prediction"] = prediction return dff PKA> None: pass def predict(self, x) -> np.ndarray: pass # this lets the model itself act as a provider. However we want to use the same Model configuration # for different datasets (i.e. as part of MultiModel) def __call__(self, *args, **kwargs): return deepcopy(self) class SkitModel(Model): def __init__(self, skit_model, features_and_labels: FeaturesAndLabels, **kwargs): super().__init__(features_and_labels, **kwargs) self.skit_model = skit_model def fit(self, x, y, x_val, y_val): self.skit_model.fit(reshape_rnn_as_ar(x), y), def predict(self, x): if callable(getattr(self.skit_model, 'predict_proba', None)): return self.skit_model.predict_proba(reshape_rnn_as_ar(x))[:, 1] else: return self.skit_model.predict(reshape_rnn_as_ar(x)) # TODO add Keras Model class KerasModel(Model): pass # class MultiModel(Model): # # def __init__(self, model_provider: Callable[[], Model], features_and_labels: FeaturesAndLabels): # super().__init__(features_and_labels) # self.model_provider = model_provider # # def fit(self, x, y, x_val, y_val) -> None: # pass # # def predict(self, x) -> np.ndarray: # # we would need to return a prediction for every and each parameters dict in the parameter space # pass PKA> Fit: model, train, test, index = _fit(df, model_provider, test_size = test_size, number_of_cross_validation_splits = number_of_cross_validation_splits, cache_feature_matrix = cache_feature_matrix, test_validate_split_seed = test_validate_split_seed, summary_printer = summary_printer) # assemble the result objects features_and_labels = model.features_and_labels loss = df[features_and_labels.loss_column] if features_and_labels.loss_column is not None else None training_summary = RegressionSummary(train[1], model.predict(train[0]), index[0], loss) test_summary = RegressionSummary(test[1], model.predict(test[0]), index[1], loss) return Fit(model, training_summary, test_summary) def backtest_regressor(df: pd.DataFrame, model: Model) -> None: x, y, y_hat, index = _backtest(df, model) features_and_labels = model.features_and_labels loss = df[features_and_labels.loss_column if features_and_labels.loss_column is not None else []] return RegressionSummary(y, y_hat, index, loss) def regress(df: pd.DataFrame, model: Model, tail: int = None) -> pd.DataFrame: dff = _predict(df, model, tail) # get labels calculate error error_function = model[("error", _mse)] dff["error"] = error_function(df[model.features_and_labels.labels], dff[[col for col in dff if col.startswith('prediction')]]) return dff PKA> 1 else y_prediction self.index = index self.loss = loss # TODO add some statisticsPKœ+O$pandas_ml_utils/wrappers/__init__.pyPKX,OR.pandas_ml_utils/wrappers/hashable_dataframe.pyimport pandas as pd class HashableDataFrame(object): def __init__(self, df: pd.DataFrame) -> None: self.df: pd.DataFrame = df def __getitem__(self, item: str): return self.df.__getitem__(item) def __getattr__(self, item): return self.df.__getattr__(item) def __hash__(self): return hash(str(self.describe())) def __eq__(self, other): try: pd.testing.assert_frame_equal(self.df, other.df) return True except: return False PKl5Ox*pandas_ml_utils/wrappers/lazy_dataframe.pyimport uuid import pandas as pd from typing import Callable, Union class LazyDataFrame(object): def __init__(self, df: pd.DataFrame, **kwargs: Callable[[pd.DataFrame], Union[pd.DataFrame, pd.Series]]) -> None: self.hash = uuid.uuid4() self.df: pd.DataFrame = df self.kwargs = kwargs def __getitem__(self, item: str): if isinstance(item, list): df = self.df[[value for value in item if value in self.df.columns]] for key in item: if key in self.kwargs: res = self.kwargs[key](self.df) if isinstance(res, pd.Series): res.name = key df = df.join(res) elif isinstance(res, pd.DataFrame): df = df.join(res.add_prefix(f'{key}_')) return df else: if item in self.df: return self.df[item] elif item in self.kwargs: return self.kwargs[item](self.df) else: raise ValueError(f"invalid item {item}") def __setitem__(self, key: str, value: Callable[[pd.DataFrame], Union[pd.DataFrame, pd.Series]]): self.hash = uuid.uuid4() if callable(value): self.kwargs[key] = value(self.df) else: self.df[key] = value def __getattr__(self, item): return self.to_dataframe().__getattr__(item) def __contains__(self, key): return key in self.df or key in self.kwargs def __hash__(self): return int(self.hash) def __eq__(self, other): return self.hash == other.hash if isinstance(other, LazyDataFrame) else False def to_dataframe(self): df = self.df.copy() for key, calculation in self.kwargs.items(): column = calculation(df) if isinstance(column, pd.DataFrame): df = df.join(column.add_prefix(f'{key}_')) else: df[key] = column return dfPKH5O ..'pandas_ml_utils-0.0.5.dist-info/LICENSEThe MIT License (MIT) Copyright (c) 2019 KIC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PK!HPO%pandas_ml_utils-0.0.5.dist-info/WHEEL HM K-*ϳR03rOK-J,/RH,szd&Y)r$[)T&UrPK!H{=? (pandas_ml_utils-0.0.5.dist-info/METADATAYkO#޿QAa`$A]FȔZ:UOsKIHc>=I: '?Ic.E!T2_کFݒx'ޏf>zZ5ǝ@32*tji "T))”Fu!̹TD?nCGE> BDq&.9 cX`94FR5@lr䶞g*ESx#+M{ q|bGENl4E` :, U)%+db&F\ZOb:$+xD繞9A6bHeAB>}G'ʱY;0)B}Imt{{[y@`XUty2?YԑY+$uS,ϑŒ ʼsxs('4|7`J{˱뢴D e-c݄y8v*]EސIC=Xު=s8|'S)@poggc+7-'AT{Pg,5ui#RU+<4 KnDlF!~o} 2ɗpRTZKʉ9M~VbW##N6A sGs]<FGY H Adp3$kGtQJx#PZN8Ǒi hby&Y: c(Q1 @G7b2<5I /0&( 3v;`aFr0Y$L[6f. &zJ"B08#ˮ/f~10΅s+F"oGAk9Y:}ix0ʼna,j]f nºT,c|@X+@ۡ[QtI6L랭_֟BKdvxuoyWmoZc}kL/OG@{|Y0IߐN$2#|dNm1c9,=gV sqy,b.8q3f$!Sx:0&H Cz|~NWdt< ͭF:fS{&|J((5SIX'6@HʉbѼl"Y)@6 01"+6bgLt^K 0yd\VrMԔo5l( [ĢLY2Ҥ.vDB|V7ӵrR5Z7!ԝ0jI ߇̽ikhͻc DN&8JI>q #!'ׯs־hoHUhVvn: 0#IO [K m0c,*k>[v#sovnov1FxO_v^ (v~8bdWzwC|osjz6zLLC=p&LA5f4iN@ { Q= Dw^T ?%(>~*`kt)c6#:-T}m m> 8ȹ֞qN@4ˊO919- #BvDI b[vE=: 7F=0I܉O|-{a뼬M-闦PxcтlN&A%]haG dOc '+MSGA1_ t* N=O-(ѷPC923:;.X~{j;׬+ux>n U^ԏ, ]2PeexcXyϑgt=ZQ:%ӅO!' Ut<\0N`RO["˝(TÌIb~>Ŗ4̠ڥ,f?tGy <7ٌم(<wXyGg뼋S~5ْciBd ^'4&810v'v Ahɤu&N~)tƵx~5N'@y;GoEg%u `p)/Z6I}a?2Zᓜ/l;9+@Dƿap;:֗ GlæB? •O8S q.P+VxĵϯX1vdQ"1:Wʚхiwb R_D\ K|%eiA4uj3K mPLWͼj9z<+vy`zUF"jq$0B^=cbņ`Χgso>?c@YGOLդԎ"7s]c P~JRH+=HƩ׶}~[^+Kؽ0;r,!l*-JqW+(vgymEQfZ{Ұ4+_ QnWo[Q[ë B~TC..P.Ebי0zNkU p02uʌ;iTLxIYCi?W<?29.w ʂ+|˷fl !G\WgQ‡_ Q9 |yOZMUaH&"Mi.M,>7o gi3U$dwĜd( AZA$C -Z/ր188'W md>3 \۪҅'EPK!HP &pandas_ml_utils-0.0.5.dist-info/RECORDɒJ,X<,AE,Dd2 O!Kuvo22?s?Y2-(rС4o)6$Dk5-_  g+ ';mR`P0/EUOm[Imr1xoM~>:w-l;4F<;~?@8lӪ|R8%l+*~.ugJEpsӻCp>83(l OdA49&H&N;Õpq}$U\WT0}}r]Bcu6sL 4%)V1eaۦQCgU9Y1+--pF ;3M嶣7WfLZb$MSMyc$Eo~hd8 Z4*;YBvP/1|: ?j`>iMN%Ro<]].I9+C~V*]w\nlޅ#qj6 ¼iiU__2/-%;4oJ<-=q]]87KԌjoN()=ޤsG7t58h=ݦ*73{آ,|M)p G޶'6Z֋}'?_PKA>