PK uKNKenordypy/__init__.py""" Nordypy Package """ # templates and initialization from ._init_methods import initialize_project from ._init_methods import hello from ._init_methods import create_config_file from ._nordstrom_rock_it import rock_it # s3 functions from ._s3 import s3_to_redshift from ._s3 import s3_get_bucket from ._s3 import s3_delete from ._s3 import s3_download from ._s3 import s3_upload from ._s3 import s3_rename_file from ._s3 import pandas_to_s3 from ._s3 import s3_to_pandas from ._s3 import s3_list_objects from ._s3 import s3_list_buckets from ._s3 import s3_change_permissions from ._s3 import s3_get_permissions # redshift functions from ._datasource import database_analyze_table from ._datasource import database_connect from ._datasource import database_get_data from ._datasource import database_get_column_names from ._datasource import redshift_to_s3 from ._datasource import database_create_table from ._datasource import database_insert from ._datasource import database_drop_table from ._datasource import data_to_redshift from ._datasource import read_sql_file from ._datasource import database_execute # dynamo functions # from .dynamo import dynamo_table_create # from .dynamo import dynamo_table_write # knowledge repo functions from ._knowledge_repo_utils import render_post __version__ = '1.2.1' __all__ = ["_datasource", "_init_methods", "_knowledge_repo_utils", "_nordstrom_rock_it", "_redshift_utils", "_s3", ] PKBMnordypy/_command_generation.pyimport math import datetime import pandas as pd def _assign_query_group(size='default'): """Create query group statement to assign with in your connection.""" size = size.lower() qg_dict = {'small': 'QG_S', 'medium': 'QG_SM', 'large': 'QG_SML', 'default': 'DEFAULT'} if size not in qg_dict.keys(): raise ValueError('Please use one of the following query group sizes: ' + '\n' + 'small, medium, large, or default') print('Using {} query_group'.format(qg_dict[size])) return "set query_group to '{}';".format(qg_dict[size]) def _create_table_statement(data, table_name): # determine type of columns in table and generate create table statement redshift_dtypes = {'float64': 'FLOAT4', 'object': 'VARCHAR', 'int64': 'INT', 'bool': 'BOOL', 'uint8': 'INT', 'datetime64[ns]': 'TIMESTAMP', 'date': 'DATE'} column_data = [] # distinguish between dates and datetimes since pandas has dates as objects for col, dtype in zip(data.columns, data.dtypes): if dtype == str('object'): if isinstance(data[col].iloc[0], datetime.date): dtype = 'date' column_data.append((col, redshift_dtypes[str(dtype)])) a = "CREATE TABLE " + table_name + ' (' b = '' for col, dtype in column_data: if dtype == 'VARCHAR': max_length = data[col].str.len().max() max_length = _roundup(max_length * 2) if max_length == 0: max_length = 10 if max_length > 60000: max_length = 'max' dtype = 'VARCHAR(' + str(max_length) + ')' b = b + col.replace('.', '_') + ' ' + str(dtype) + ',' return a + b[:-1] + ');' def _create_insert_statement(data, table_name): # create an insert statement using various data formats values = '' statement = 'INSERT INTO ' + table_name + ' VALUES ' if type(data) == str: if data.endswith('.csv'): data = pd.read_csv(data) if type(data) == tuple: values = str(data) values = values.replace('nan', 'DEFAULT') values = values.replace('((', '(') values = values.replace('))', ')') elif type(data) == dict: for key, v in data.items(): values += str(tuple(v.values())) elif type(data) == list: for v in data: values += str(tuple(v)) elif type(data) == pd.core.frame.DataFrame: data = data.to_records(index=False) for i in range(len(data)): values += str(data[i]) values = values.replace('nan', 'DEFAULT') values = values.replace(')(', '), (') return statement + values + ';' def _generate_copy_command(copy_command=None, cred_str=None, bucket=None, s3_filepath=None, redshift_table=None, delimiter=None, copy_format=None): # fill in redshift schema.table, s3 path and credentials # build copy_command from scratch if not copy_command: copy_command = ["copy " + redshift_table, " from 's3://" + bucket + "/" + s3_filepath + "'", " credentials", " '" + cred_str + "'"] if delimiter: del_str = " delimiter " + "'{}'".format(delimiter) copy_command.append(del_str) else: copy_command.append(' csv') if copy_format: copy_command.append(copy_format) if '.manifest' in s3_filepath: copy_command.append(" COMPUPDATE OFF STATUPDATE OFF") else: copy_command.append(" COMPUPDATE ON") return ''.join(copy_command) # just fill in some things on the copy command # if copy_command.endswith('.sql'): # copy_command = read_sql_file(copy_command) if bucket: s3 = "s3://" + bucket + "/" + s3_filepath copy_command = copy_command.format(redshift_table, s3, cred_str, delimiter) if '.manifest' in s3_filepath: copy_command += " COMPUPDATE OFF STATUPDATE OFF" else: copy_command += " COMPUPDATE ON" return copy_command # fill in credentials only else: copy_command = copy_command.format(cred_str) return copy_command def _generate_unload_command(cred_str, select_sql, bucket, s3_filepath, delimiter, parallel, gzip, unload_command, manifest, allowoverwrite): # if prebuilt unload_command provided if unload_command: # if unload_command.endswith('.sql'): # unload_command = read_sql_file(unload_command) unload_command.format(cred_str) return unload_command # build unload command if 'select' not in select_sql.lower(): select_sql = 'select * from ' + select_sql unload_command = ["unload ('" + select_sql + "') ", "to 's3://" + bucket + "/" + s3_filepath + "'", "credentials '" + cred_str + "'" ] if delimiter: unload_command.append("delimiter as '" + delimiter + "'") if not parallel: unload_command.append("PARALLEL OFF") if gzip: unload_command.append("GZIP") if manifest: unload_command.append('manifest') if allowoverwrite: unload_command.append("allowoverwrite") unload_command.append(';') unload_command = " ".join(unload_command) return unload_command # --- HELPER FUNCTIONS --- def _roundup(x): if math.isnan(x): return 10 return int(math.ceil(x / 100.0)) * 100 PKVKN7ggnordypy/_datasource.pyimport psycopg2 import pymysql import os, sys import yaml from boto3.exceptions import S3UploadFailedError import pandas as pd from . import _redshift_utils from ._s3 import _s3_get_temp_creds from ._s3 import pandas_to_s3, s3_to_redshift from ._command_generation import _create_table_statement from ._command_generation import _create_insert_statement from ._command_generation import _generate_unload_command from ._command_generation import _assign_query_group from ._secret import _get_secret def database_analyze_table(database_key=None, yaml_filepath=None, table=None, schema=None): """Analyze table stats in redshift. Parameters ---------- database_key : str yaml_filepath : str table : str table name in redshift, could also be the schema and table together ex. table='public.nordypy_test' schema : str table schema in redshift (ex. public) """ if table is None: raise ValueError('Provide a table name: ex. sessions') if '.' in table: schema, table = table.split('.') if schema is None: raise ValueError('Provide a table schema: ex. public') sql = _redshift_utils.analyze_table.format(schema, table) result = database_get_data(database_key=database_key, yaml_filepath=yaml_filepath, sql=sql, as_pandas=True) return result def database_connect(database_key=None, yaml_filepath=None): """Return a database connection object. Connect with YAML config or bash environment variables. Parameters ---------- database_key : str indicates which yaml login you plan to use of the bash_variable key if no YAML file is provided yaml_filepath : str [optional] path to yaml file to connect if no yaml_file is given, will assume that the database_key is for a bash_variable Returns ------- conn : Database Connection Object Examples -------- # if connection in bash_profile conn = nordypy.database_connect('REDSHIFT') # yaml file with only one profile conn = nordypy.database_connect('config.yaml') # yaml file with multiple profiles conn = nordypy.database_connect('prod_redshift', 'config.yaml') """ if yaml_filepath: try: with open(os.path.expanduser(yaml_filepath), 'r') as ymlfile: cfg = yaml.load(ymlfile) except (OSError, IOError): # if out of order (for both python 2 and 3) temp_filepath = database_key database_key = yaml_filepath with open(os.path.expanduser(temp_filepath), 'r') as ymlfile: cfg = yaml.load(ymlfile) if _dict_depth(cfg) != 1: if database_key: cfg = cfg[database_key] else: raise ValueError( 'YAML file contains multiple datasource profiles. Provide a datasource key.') if 'secret_name' in cfg: if 'region_name' in cfg: cfg = _get_secret(cfg['secret_name'], region_name = cfg['region_name']) else: print("You are trying to access AWS secrets and need region_name.") print("Please specify region_name in config.yaml (e.g., us-west-2)") print("Exiting now.") sys.exit() if 'dbtype' not in cfg: print( "UserWarning: Update config.yaml with 'dbtype' parameter: ['redshift', 'mysql', 'teradata'] -- ") if 'dbtype' in cfg: if cfg['dbtype'] == 'mysql': conn = __mysql_connect(cfg) else: conn = __redshift_connect(cfg) else: # default to redshift database conn = __redshift_connect(cfg) elif database_key: try: try: # redshift conn = psycopg2.connect(os.environ[database_key]) except psycopg2.OperationalError: # mysql conn = pymysql.connect(os.environ[database_key]) except (KeyError, pymysql.err.OperationalError): # for the case where positional arguments were used and the # datasource was actually the YAML path try: conn = database_connect(yaml_filepath='config.yaml', database_key=database_key) except: yaml_filepath = database_key conn = database_connect(yaml_filepath=yaml_filepath, database_key=None) else: raise ValueError('Provide a YAML file path or a connection string via a bash_variable') return conn def __redshift_connect(cfg): return psycopg2.connect(host=cfg['host'], dbname=cfg['dbname'], password=cfg['password'], port=cfg['port'], user=cfg['user'] ) def __mysql_connect(cfg): return pymysql.connect(host=cfg['host'], db=cfg['dbname'], password=cfg['password'], port=cfg['port'], user=cfg['user'] ) def database_drop_table(table_name=None, database_key=None, yaml_filepath=None, conn=None): """ Drop table in redshift if exists. Parameters ---------- table_name : str - schema.table database_key (str) - bash or yaml variable yaml_filepath (str) - where if the yaml file conn (psycopg2 connection) Returns ------- None Examples -------- nordypy.datasource_drop_table('public.nordypy_test', 'REDSHIFT') """ drop_statement = 'DROP TABLE IF EXISTS {};'.format(table_name) close_connection = False if not conn: conn = database_connect(database_key=database_key, yaml_filepath=yaml_filepath) close_connection = True cursor = conn.cursor() try: cursor.execute(drop_statement) conn.commit() cursor.close() print('{} table dropped'.format(table_name)) except psycopg2.ProgrammingError as e: print(e) if close_connection: conn.close() return True def database_create_table(data=None, table_name='', make_public=True, database_key=None, yaml_filepath=None, create_statement=None, conn=None): """ Create blank table. Can use premade statement or autogenerate the create statement based on the data input. Parameters ---------- data (dataframe or filepath) - required table_name (str) - schema.tablename to be built in the database make_public (boolean) - should the table be made public database_key (str) - yaml or bash key yaml_filepath (str) [optional] - path and file name of yaml file create_statement (str or filepath) - create sql or sql file conn (psycopg2 Conn) Returns ------- None Examples -------- # automatically from pandas dataframe nordypy.database_create_table(data=df, table_name='schema.my_table', yaml_filepath='~/config.yaml', database_key='REDSHIFT') # using create statement create_statement = 'create table public.nordypy_test (name VARCHAR(100), value INT);' nordypy.database_create_table(create_statement=create_statement, database_key='REDSHIFT') """ close_connection = False if not create_statement: create_statement = _create_table_statement(data, table_name=table_name) if not conn: conn = database_connect(database_key=database_key, yaml_filepath=yaml_filepath) close_connection = True cursor = conn.cursor() try: if create_statement.endswith('.sql'): create_statement = read_sql_file(create_statement) cursor.execute(create_statement) conn.commit() print('{} table created'.format(table_name)) if make_public: grant_sql = 'GRANT ALL ON {} TO PUBLIC;'.format(table_name) cursor.execute(grant_sql) print('Access to {} granted to all'.format(table_name)) conn.commit() cursor.close() except psycopg2.ProgrammingError as e: print(e) if close_connection: conn.close() return True def database_insert(data=None, table_name='', database_key=None, yaml_filepath=None, insert_statement=None, conn=None, query_group=None): """ Insert data into an already existing table. Can insert a full csv, a full pandas dataframe, a single tuple of data, or run an insert statement on tables already in the database. Parameters ---------- data (dataframe or filepath) - required table_name (str) - schema.tablename of table in the database database_key (str) - yaml or bash key yaml_filepath (str) [optional] - path and file name of yaml file insert_statement (str or filepath) - insert sql or sql file conn (psycopg2 Conn) - connection query_group (str) - None, 'small', 'medium', 'large' Returns ------- None Examples -------- # from single tuple data = (1, 2 ,3 ,4) # from a tuple of tuples data = ((1, 2, 3, 4), (5, 6, 7, 8)) # from single dictionary data = {'A':1, 'B':2, 'C':3, 'D':4} # from list of dictionaries data = [{'A':1, 'B':2, 'C':3, 'D':4}, {'A':5, 'B':6, 'C':7, 'D':8}] # from a dictionary of dictionaries data = {'bob': {'A':1, 'B':2, 'C':3, 'D':4}, 'sally': {'A':5, 'B':6, 'C':7, 'D':8}} # from a pandas dataframe data = pd.DataFrame([(1,2,3, 4), (5, 6, 7, 8)]) # from csv data = 'dir/file.csv' # from create statement insert_statement = 'insert into schema.my_table (select * from schema.other_table);' """ # convert alternative types to dataframe and create insert_statement if not insert_statement: insert_statement = _create_insert_statement(data, table_name) # execute insert_statement database_execute(database_key=database_key, yaml_filepath=yaml_filepath, sql=insert_statement, conn=conn, query_group=query_group, return_data=False, as_pandas=False) print('Data inserted into table {}'.format(table_name)) return None def database_execute(database_key=None, yaml_filepath=None, sql=None, conn=None, return_data=False, as_pandas=False, query_group=None): """Excecute one or more sql statements. Parameters ---------- database_key : str [REQUIRED] indicates which yaml login you plan to use of the bash_variable key if no YAML file is provided yaml_filepath : str [optional] path to yaml file to connect if no yaml_file is given, will assume that the database_key is for a bash_variable sql : str or filename single or multiple sql statements separated with ";" or filepath to sql file to be executed conn : connection object [optional] can provide a connection, if you want a consistent SQL session return_data : bool indcates that data should be returned from the final query as_pandas : bool if data is returned, should it be as a dataframe query_group : str assign query_group [None, 'default', 'small', 'medium', 'large'] Returns ------- """ close_connection = False if sql.endswith('.sql'): with open(sql, 'r') as infile: sqlcode = infile.read() else: sqlcode = sql if ';' not in sqlcode: raise ValueError("SQL statements must contain ';'") if query_group: sqlcode = _assign_query_group(size=query_group) + sqlcode sql_statements = sqlcode.split(';')[:-1] # get connection if not there if not conn: conn = database_connect(database_key=database_key, yaml_filepath=yaml_filepath) close_connection = True cursor = conn.cursor() if as_pandas is True: # assume if they want a pandas return that data should return too return_data = True if not return_data: for i, stmt in enumerate(sql_statements): cursor.execute(stmt) conn.commit() print('Statement {} finished'.format(i + 1)) cursor.close() print('SQL Execution Finished') if close_connection: conn.close() return True else: if len(sql_statements) > 1: for i, stmt in enumerate(sql_statements[:-1]): cursor.execute(stmt) conn.commit() print('Statement {} finished'.format(i)) if as_pandas: data = pd.read_sql(sql=sql_statements[-1], con=conn) else: data = database_get_data(conn=conn, sql=sql_statements[-1]) print('SQL Execution Finished') if close_connection: conn.close() return data def database_get_column_names(database_key=None, yaml_filepath=None, table=None, schema=None, data_type=False): """Determine column names on a particular table. Parameters ---------- database_key : str yaml_filepath : str table : str table name in redshift, could also be the schema and table together ex. table='public.nordypy_test' schema : str table schema in redshift (ex. public) data_type : bool return column type """ if table is None: raise ValueError('Provide a table name: ex. sessions') if '.' in table: schema, table = table.split('.') if schema is None: raise ValueError('Provide a table schema: ex. public') if data_type: sql = _redshift_utils.table_columns.format(', data_type ', schema, table) else: sql = _redshift_utils.table_columns.format('', schema, table) result = database_get_data(database_key=database_key, yaml_filepath=yaml_filepath, sql=sql, as_pandas=True) return result def database_get_data(database_key=None, yaml_filepath=None, sql=None, as_pandas=False, conn=None, query_group=None): """ Helper function to connect to a datasource, run the specified sql statement(s), close the connection and return the result(s). Parameters ---------- database_key : str [REQUIRED] indicates which yaml login you plan to use or the bash_variable key if no YAML file is provided yaml_filepath : str path to yaml file to connect if no yaml_file is given, will assume that the database_key is for a bash_variable sql : str or filename SQL to execute in redshift, can be single string or multiple statements conn : database connection database connection object if you want to pass in an already established connection as_pandas : bool return data as a pandas dataframe query_group : str assign query_group [None, 'default', 'small', 'medium', 'large'] Returns ------- data (Records from redshift) Examples -------- sql = "select top 10 * public.my_table;" # yaml file with multiple profiles data = nordypy.database_get_data('dsa', 'config.yaml', sql) """ # assign sql to proper variable if positional arguments used close_connection = False if not conn: # open a connection if not already if type(yaml_filepath) is str: if len(yaml_filepath.split()) > 1: sql = yaml_filepath yaml_filepath = None conn = database_connect(database_key, yaml_filepath) close_connection = True if as_pandas: if query_group: sql = _assign_query_group(size=query_group) + sql data = pd.read_sql(sql=sql, con=conn) else: cursor = conn.cursor() try: if query_group: query_group_sql = _assign_query_group(size=query_group) cursor.execute(query_group_sql) conn.commit() cursor.execute(sql) data = cursor.fetchall() cursor.close() except psycopg2.ProgrammingError as e: raise (e) if close_connection: conn.close() return data def database_list_tables(schema=None, prefix=None, database_key=None, yaml_filepath=None, conn=None): """List the table names, their size and optionally their row names and variable types.""" pass def database_to_pandas(database_key=None, yaml_filepath=None, sql=None, conn=None, query_group=None): """Convenience wrapper for for database_get_data to pandas function. """ return database_get_data(database_key=database_key, yaml_filepath=yaml_filepath, sql=sql, conn=conn, as_pandas=True, query_group=query_group) def data_to_redshift(data, table_name, bucket, s3_filepath='temp', database_key=None, yaml_filepath=None, copy_command=None, create_statement=None, delimiter=None, drop_table=False, environment=None, region_name='us-west-2', profile_name=None): """ Move data from pandas dataframe or csv to redshift. This function will automatically create a table in redshift if none exists, move the files up to s3 and then copy them to redshift from there. If a table already exists, the default behavior is to INSERT into the table. Parameters ---------- data : dataframe or csv filepath [REQUIRED] table_name : str [REQUIRED] schema.tablename to be built in the database bucket : str [REQUIRED] name of S3 bucket to be used s3_filepath : str [required] upload location in s3 database_key : str [REQUIRED] yaml or bash key yaml_filepath : str path and file name of yaml file copy_command : str or filepath copy command or path to copy command, will be generated if None create_statement : str or filepath prebuilt create sql or path to sql file, will be generated if None delimiter : None or str, default='|', ',', '\t' data delimiter drop_table : bool drop table if exists? environment : str, 'aws', 'local' where is this running? region_name : str profile_name : str Returns ------- None Examples -------- nordypy.data_to_redshift(data=df, table_name='public.my_table', bucket='my_bucket', s3_filepath='my_data/data_', database_key='REDSHIFT') """ if drop_table: database_drop_table(table_name=table_name, database_key=database_key, yaml_filepath=yaml_filepath) database_create_table(data=data, table_name=table_name, database_key=database_key, yaml_filepath=yaml_filepath, create_statement=create_statement) else: try: schema, name = table_name.split('.') except ValueError: schema = 'public' name = table_name table_exists = "select exists(select * from information_schema.tables where table_schema='{}' and table_name='{}');".format( schema, name) if not database_get_data(database_key=database_key, yaml_filepath=yaml_filepath, sql=table_exists, query_group='small')[0][0]: database_create_table(data=data, table_name=table_name, database_key=database_key, yaml_filepath=yaml_filepath, create_statement=create_statement) # upload data to s3 pandas_to_s3(data=data, delimiter=delimiter, bucket=bucket, s3_filepath=s3_filepath, environment=environment, profile_name=profile_name) # copy from s3 to redshift s3_to_redshift(copy_command=copy_command, database_key=database_key, yaml_filepath=yaml_filepath, environment=environment, bucket=bucket, s3_filepath=s3_filepath, redshift_table=table_name, delimiter=delimiter, region_name=region_name, profile_name=profile_name) print('Data upload to Redshift via S3') return None def read_sql_file(sql_filename=None): """ Read in a SQL file as a string. Parameters ---------- sql_filename : filename [REQUIRED] relative path to sql file Returns ------- string containing the sql script Examples -------- >>> sql = nordypy.read_sql_file('../SQL/myquery.sql') """ with open(sql_filename) as sql_file: sql = sql_file.read() return sql def redshift_to_redshift(yaml_filepath=None, database_key_from=None, database_key_to=None, select_sql=None, query_group=None, to_redshift_table=None, bucket=None, s3_filepath=None, unload_command=None, environment=None, region_name='us-west-2', profile_name=None, delimiter='|', parallel=True, gzip=True, manifest=False, allowoverwrite=True): """Move data from one redshift database to another.""" try: redshift_to_s3(yaml_filepath=yaml_filepath, database_key=database_key_from, select_sql=select_sql, query_group=query_group, bucket=bucket, s3_filepath=s3_filepath, environment=environment, region_name=region_name, profile_name=profile_name, unload_command=unload_command, delimiter=delimiter, parallel=parallel, gzip=gzip, manifest=manifest, allowoverwrite=allowoverwrite) except S3UploadFailedError as e: raise (e) try: s3_to_redshift() except: pass def redshift_to_s3(database_key=None, yaml_filepath=None, select_sql=None, query_group=None, conn=None, bucket=None, s3_filepath=None, environment=None, region_name='us-west-2', profile_name=None, unload_command=None, delimiter='|', parallel=True, gzip=None, manifest=False, allowoverwrite=True): """ Select data from redshift and move to s3. You can provide your own unload command or have one generated for you. When the file gets written to S3, a 3-digit code will be appended ex. 000 This is so it doesn't overwrite other files. Parameters ---------- database_key : str [REQUIRED] bash or yaml variable yaml_filepath : str [REQUIRED] where if your yaml file select_sql : str or filename [REQUIRED] selection sql statement or just the table_name if you want to select * from table bucket : str [REQUIRED] s3 bucket to move things to s3_filepath : str [REQUIRED] - s3 file location in bucket query_group : str assign query_group [None, 'default', 'small', 'medium', 'large'] conn : database connection database connection object if you want to pass in an already established connection environment : str where if the script running region_name : str where in AWS profile_name : str default 'nordstrom-federated' unload_command : str or filename unload sql delimiter : ('|', ',' or '\t') delimiter character for unloading parallel : bool unload into multiple files (parallel=True) or single file (False) gzip : bool apply compression manifest : bool include manifest when unloading Returns ------- None Examples -------- nordypy.redshift_to_s3(select_sql='public.nordypy_test', database_key=key, environment=env, bucket=bucket, s3_filepath='my_data/latest.csv') """ close_connection = False cred_str = _s3_get_temp_creds(region_name=region_name, environment=environment, profile_name=profile_name) unload_command = _generate_unload_command(cred_str, select_sql, bucket, s3_filepath, delimiter, parallel, gzip, unload_command, manifest, allowoverwrite) if not conn: # open a connection if not already if type(yaml_filepath) is str: if len(yaml_filepath.split()) > 1: sql = yaml_filepath yaml_filepath = None conn = database_connect(database_key, yaml_filepath) close_connection = True cursor = conn.cursor() if query_group: query_group_sql = _assign_query_group(size=query_group) cursor.execute(query_group_sql) conn.commit() cursor.execute(unload_command) conn.commit() cursor.close() if close_connection: conn.close() print('Redshift data unloaded to S3') return None # ------- HELPER FUNCTIONS -------- def _dict_depth(d): if isinstance(d, dict): return 1 + (max(map(_dict_depth, d.values())) if d else 0) return 0 PKIFNRBy$y$nordypy/_init_methods.pyimport os import pkg_resources from shutil import copyfile from nordypy import _nordstrom_rock_it import yaml def _unpack_folder_structure(structure): """Private function that determines the folder structure from a given template in the package_resources part of nordypy.""" high_level = [folder for folder in structure if '/' not in folder] low_level = {} for folder in structure: if '/' in folder: directory, subdirectory = folder.split('/') if directory in low_level: low_level[directory].append(subdirectory) else: low_level[directory] = [subdirectory] return high_level, low_level def create_config_file(path = './', ask_me = False): ''' Write config.yaml. Takes user input and generates a config.yaml file in the directory indicated. path: str path to send config file ask_me: bool If true, function will prompt user for details and build the config file If false, function copies config file from nordypy/package_resources/assets/default/config.yaml ''' filename = path + 'config.yaml' if ask_me: print(' --- Create your config.yaml by answering a series of questions: ---') print(" --- NOTE: Do NOT wrap your answers with '' ") def get_params(): database = input('YAML database_key (ex. dsa): ') secret = input('Do you have an AWS secret name? (y/n): ') if secret in ['y', 'Y', 'yes', 'Yes', 'True']: secret_name = input('Secret name: ') region_name = input('Region name (default = us-west-2): ') if region_name == '': region_name = 'us-west-2' blob = {database: {'secret_name': secret_name, 'region_name': region_name} } else: host = input('Host: ') user = input('Username: ') password = input('Password: ') dbtype = input('Database type (redshift, mysql or teradata): ').lower() while dbtype not in ['redshift', 'mysql', 'teradata']: dbtype = input('Try again: dbtype can be either redshift, mysql or teradata: ') if dbtype == 'redshift' or dbtype == 'mysql': port = input('Port (ex. 5439): ') while not port.isdigit(): port = input('Port must be an integer (ex. 5439): ') dbname = input('dbname (ex. analytics_prd): ') blob = {database : {'host': host, 'port': int(port), 'dbname': dbname, 'user': user, 'password': password, 'dbtype': dbtype} } elif dbtype == 'teradata': use_ldap = input('Use Ldap (true/false):') blob = {database : {'host': host, 'user': user, 'password': password, 'use_ldap': use_ldap, 'dbtype': dbtype} } else: blob = {} return blob i = 1 config_dict = {} while i == 1: config_dict.update(get_params()) add_another = input('Add another database? (y/n): ') print('\n') if add_another in ['n', 'N', 'No', 'no', 'False']: i = 0 if os.path.isfile(filename): # check if file exists already overwrite = input('File already exists at {}, overwrite? (y/n): '.format(filename)) if overwrite in ['y', 'Y', 'yes', 'Yes', 'True']: pass else: # change filename to not overwrite old one newfilename = input('New filename? ex. updated_config.yaml: ') filename = path + newfilename with open(filename, 'w') as f: yaml.dump(config_dict, f, default_flow_style=False) print('Thanks! Config file {0} created.'.format(filename)) else: if os.path.isfile(filename): overwrite = input('File already exists at {}, overwrite? (y/n): '.format(filename)) if overwrite in ['y', 'Y', 'yes', 'Yes', 'True']: copyfile(pkg_resources.resource_filename('nordypy', 'package_resources/assets/default/config.yaml'), filename) print('Thanks! Config file {0} created.'.format(filename)) else: # change filename to not overwrite old one print('File already exists at fake/config.yaml, please remove.') def display_available_templates(): """Helper function to display available templates""" print('Available Folder Structures: ') print("-- default, marketing\n") def initialize_project(structure=None, create_folders=True, create_files=True, path='.', ask_me=False): """Create standard folder structure for analytics and data science projects. Generate template files as well. Parameters ---------- structure : str key to the type of structure to create, ex. 'default', 'marketing/default' create_folders : bool create folder tree create_files : bool create template files for project path : str relative path where files and folders should be built from Returns ------- True Examples -------- >>> nordypy.initialize_project('default') """ yaml_root = 'package_resources/assets/{}/folder_structure.yaml' if not structure: print('Please provide a valid folder structure to be used.\n') display_available_templates() structure = str(input('Which structure would you like?: ')) if type(structure) is not str: raise ValueError('Structure argument must be of type str') if '/' in structure: asset, structure = structure.split('/') yaml_location = yaml_root.format(asset) else: # if only a single key is given yaml_location = yaml_root.format(structure) if pkg_resources.resource_exists('nordypy', yaml_location): structure = 'default' else: yaml_location = yaml_root.format('default') if path != '.': current_dir = os.getcwd() try: os.chdir(path) except OSError as e: # handles both python 2 and 3 raise (e) # load up official structures yaml_location = pkg_resources.resource_filename('nordypy', yaml_location) with open(yaml_location, 'r') as ymlfile: cfg = yaml.load(ymlfile) files = cfg[structure]['files'] # files to build structure = cfg[structure]['structure'] # folder structure to build high_level, low_level = _unpack_folder_structure(structure) if create_folders: for directory in high_level: if not os.path.exists(directory): os.makedirs(directory) for directory in low_level.keys(): for subdirectory in low_level[directory]: subdirectory = str(directory) + '/' + subdirectory if not os.path.exists(subdirectory): os.makedirs(subdirectory) if create_files: for file in files: print(file['name']) if not os.path.isfile(file['name']): if (file['name'] == 'config.yaml') & (ask_me is True): os.chdir(current_dir) create_config_file(path=path, ask_me=True) os.chdir(path) else: copyfile(pkg_resources.resource_filename('nordypy', 'package_resources/' + file['file']), file['name']) print('Created {} file'.format(file['name'])) print('Ready to Rock!!!!') _nordstrom_rock_it.rock_it('small') if path != '.': os.chdir(current_dir) return True def _undo_create_project(really=False, path='.'): """Private function to remove files and folders generated on initialization. Parameters ---------- really : bool do you really want to do this? path : filepath the root directory Returns ------- bool """ if not really: print('Do you really want to remove files and folders?') return really if path != '.': current_dir = os.getcwd() os.chdir(path) if os.path.isfile('.gitignore'): os.remove('.gitignore') if os.path.isfile('README.md'): os.remove('README.md') if os.path.isfile('config.yaml'): os.remove('config.yaml') for directory in ['code/python', 'code/R', 'code/SQL', 'code', 'sandbox', 'data', 'logs', 'output', 'docs']: if os.path.exists(directory): os.rmdir(directory) if path != '.': os.chdir(current_dir) return really def hello(): print("Hi, I'm the Nordypy package and I'm here to help you!") PK5sKN6,L,<< nordypy/_knowledge_repo_utils.pyimport os import warnings import datetime import re from ._s3 import s3_upload, s3_get_bucket # TODO: handle .ipynb files def _post_meta_data(file_to_render=None, post_title=None, post_description=None, post_category=None, post_tags=[]): """ Creates a dictionary of post information to reformat markdown file, this is a private function called by the render_post function. Parameters ---------- file_to_render : str markdown file that you'd like to format for the Knowledge Repo post_title : str title for Knowledge Repo post post_description : str description for Knowledge Repo post post_category : str which category the the post should be under in the Knowledge Repo. The category must be one of the following four options: "digital", "marketing", "supply_chain", or "corporate_analytics". post_tags : list of strings optional character vector containing tags to attach to the Knowledge Repo post. The tags provided are both searchable and browsable on the Knowledge Repo. Returns ------- A dictionary of meta data for the markdown file to be rendered """ # check args if not file_to_render: raise ValueError('No file path for file_to_render.') if os.path.splitext(file_to_render.lower())[1] != '.md': raise ValueError('file_to_render must have a markdown file extension (.md)') if not post_title: raise ValueError('No post_title given.') if not post_description: raise ValueError('No post_description given.') if post_category not in ['digital', 'marketing', 'supply_chain', 'corporate_analytics']: raise ValueError('The post_category must be one of the following four options: ' '"digital", "marketing", "supply_chain", or "corporate_analytics".') if not post_tags: warnings.warn('No post_tags given.') elif type(post_tags) is not list: raise TypeError('post_tags must be a list.') else: post_tags = [tag.lower().replace(' ', '-') for tag in post_tags] # create meta data dictionary current_date = datetime.datetime.now().strftime('%Y-%m-%d') meta_data = { 'file_to_render': file_to_render, 'post_title': post_title, 'post_description': post_description, 'post_category': post_category, 'post_tags': post_tags, 'extension': os.path.splitext(file_to_render)[1], 'path_to_file': os.path.split(file_to_render)[0], 'yaml_lines': '\n'.join([ "---", "title: {0}".format(post_title), "description: {0}".format(post_description), "categories: {0}".format(post_category), "tags: {0}".format((', ').join(post_tags)), "---" ]), 'rendered_file': current_date + '-' + post_title.replace(' ', '-') + '.md' } return meta_data def _image_path_sub(base_text, image_dict): """ Substitutes any local image references in the markdown file with their corresponding s3 location. This is a private function called by the render_post function. Parameters ---------- base_text : str original markdown contents image_dict : dict image dictionary with original refs as keys and s3 refs as values Returns ------- A string with the new markdown contents """ for key, val in image_dict.items(): base_text = base_text.replace(key, val) return base_text def render_post(bucket, file_to_render=None, post_title=None, post_description=None, post_category=None, post_tags=[], output_path=None): """ Reformats markdown file to be added to Knowledge Repo including adding a yaml header, uploading any images to s3, and replacing local image refs with s3 location. Parameters ---------- file_to_render : str [REQUIRED] markdown file that you'd like to format for the Knowledge Repo post_title : str [REQUIRED] title for Knowledge Repo post post_description : str [REQUIRED] description for Knowledge Repo post post_category : str [REQUIRED] category the post should be under in the Knowledge Repo. The category must be one of the following four options: "digital", "marketing", "supply_chain", or "corporate_analytics" post_tags : list of strings optional character vector containing tags to attach to the Knowledge Repo post. The tags provided are both searchable and browsable on the Knowledge Repo (e.g. ['personas', 'segmentation', 'clustering', 'python']) output_path : str desired location of rendered file relative to location where you are running this function default location is the same path as the original markdown Returns ------- None """ if output_path: if not os.path.exists(output_path): raise ValueError('output_path is not a valid file location') if not bucket: raise ValueError('Please specify an S3 bucket') s3_bucket = bucket # get post meta data meta_data = _post_meta_data(file_to_render, post_title, post_description, post_category, post_tags) s3_image_url_prefix = 'https://s3-us-west-2.amazonaws.com/' + s3_bucket current_date = datetime.datetime.now().strftime('%Y-%m-%d') image_dict = {} with open(meta_data['file_to_render']) as old_file: for line in old_file: # findall image paths in each line (markdown or html format) md = re.findall(r'[!]\[.*?\]\((["\'-\/.\\\s\w]+)\)', line) html = re.findall(r'>> nordypy.make_one_pager(author='Chuan Chen', jira='2037') """ with open(pkg_resources.resource_filename('nordypy', 'nordstrom_package_resources/one-pager.md')) as f: template = f.read() if date == 'today': date = str(datetime.datetime.today().date()) if jira: jira_dict = {'story': []} for story in jira: jira_dict['story'].append({'jira': story}) context = {'title': title, 'author': author, 'author_email': author_email, 'reviewer': reviewer, 'reviewer_email': reviewer_email,'date': date, 'stakeholder': stakeholder, 'story': jira_dict} template = pystache.render(template, context) with open('one-pager.md', 'w') as outfile: outfile.write(template) print('One Pager Built') PKAL8(3nordypy/package_resources/assets/default/.gitignore# YAML stuff *.yaml # History files .Rhistory .Rapp.history # Session Data files .RData # Example code in package build process *-Ex.R # Output files from R CMD build /*.tar.gz # Output files from R CMD check /*.Rcheck/ # RStudio files .Rproj.user/ # produced vignettes vignettes/*.html vignettes/*.pdf # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 .httr-oauth # knitr and R markdown default cache directories /*_cache/ /cache/ # Temporary files created by R markdown *.utf8.md *.knit.md # Python Files to Ignore # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ venv* env* # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # PyCharm .idea/ PKrKN+gg2nordypy/package_resources/assets/default/README.md## Name of the project At the top of the file there should be a short introduction and/ or overview that explains **what** the project is. #### Project status List the project status (keep one of the below bullets): * Completed with active maintenance * Completed with no active maintenance * In progress * On hold * Under review * Just started ## Motivation A short description of the motivation behind the creation and maintenance of the project. This should explain **why** the project exists. ## Confluence If a confluence page exists for the project, link it here. ## Requirements If your code requires environment variables to be set, access to specific databases, or requires certain software to be available, mention those here. Additionally, if there is a requirements.txt or .Rprofile needed, list that here. ## Getting started Provide code examples and explanations of how to get the project up and running. ## Example use and output Show what the library or code does as concisely as possible; the reader should be able to figure out **how** your project solves their problem by looking at the code example. Make sure the API you are showing off is obvious, and that your code is short and concise. ## Tests If your project includes tests, describe and show how to run them with code examples. ## Reference notes Depending on the size of the project, if it is small and simple enough the reference docs can be added to the README. For medium size to larger projects include the documentation within the docs/ subfolder of this repo template. ## Contributors Let the reader know who to reach out to with questions by listing the main contributors. * Contributor name 1 (email1@nordstrom.com) * Contributor name 2 (email2@nordstrom.com) Also let people know how they can dive into the project, include important links to things like issue trackers if applicable. PKHFNww4nordypy/package_resources/assets/default/config.yaml: host: port: dbname: user: password: dbtype: PKALӯww>nordypy/package_resources/assets/default/folder_structure.yamldefault: # standard folder structure for digital analytics and data science structure: ['code', 'code/R', 'code/SQL', 'code/python', 'sandbox', 'data', 'logs', 'output', 'docs'] files: [{'file': 'assets/default/.gitignore', 'name': '.gitignore'}, {'file': 'assets/default/README.md', 'name': 'README.md'}, {'file': 'assets/default/config.yaml', 'name': 'config.yaml'}] PKALJ4nordypy/package_resources/assets/default/therock.txt ``` ```......-----..``` `-:--......::...-....` `------...://:-........``` .+/:-..---::--....``.....``` `+o//:--://-------:/..-----... :os++:-::::.-::---.-..--------. ./+s/---------:-.....------::/:`` ////:://---...--......-----//:-``` -+///::/+:--.----......----/+/-.``` /+//:-::///--..--...-..--//++-`.``` /o+/:---:-----...-..``.-:/o+-.`.```` oos+///--.---::-.-....--:/+/......`` ` ---////::--:----:::....``````........```./` .s/://+//::---....-......`````.......`````.- :o-://++//::----...........````....```````.:- oo:::////::::---...........````````````...../` o:///:::::::------.......```````````........:- o-/+/:::----------........```````......-----:/ o:/+o+/:-----------..................--------/ //:/oo+/::-------------............---::::::+/ `d//++oo+/::::----------------------:::::::::- +s///+oo+//:::::------:::::::::::::::////:+o hdo//+oo+//::::::::::::////////+++///////:o `hhy+/+sso+///////::::///+++++++++++++///+. .ddy++ossso++////////++++++++++++++++//// `hhy++osssso++++++++++++++++ooooo++++/: +dho++osssoooooooo++ooooooooooo+/+/+ `/so/+++oooooooooooooooooooo+++o/. -+/////++++oooooooo+++++++/+/ `.://+oyo++ooooosso++s:. `.-:/::::-.` --------- // NORDYPY // --------- PKALy6nordypy/package_resources/assets/default/therock25.txt `....---.`` :----:-.....` /+:-::----.----` .//::----..---/:` //::/:.-....://.`` o+::----...-/+..`` -://:-----..```....``- +:///:--.....```````... ///::----....````....-: //o/:-----........--::/ /++o+::-------:::::::/: ss+oo/:::::////+++///. sy+sso++//+++++o++/: /s+oooooooooooo++- .-:/+oooooo++-` `...` --- // NORDYPY // --- PKAL Q@nordypy/package_resources/document_templates/default/one_page.md--- title: "{{title}}" author: "{{author}}" author_email: "{{author_email}}" reviewer_name: {{#reviewer}}"{{reviewer}}"{{/reviewer}} reviewer_email: {{#reviewer_email}}"{{reviewer_email}}"{{/reviewer_email}} date: "{{date}}" stakeholder: {{#stakeholder}}"{{stakeholder}}"{{/stakeholder}} jira_story: - {{#story}}"{{jira}}"{{/story}} --- **Objective:** **Background:** **Approach:** **Findings:** **Recommendations:** PKsKN$##nordypy-1.2.1.dist-info/LICENSEApache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS PK!HMuSanordypy-1.2.1.dist-info/WHEEL HM K-*ϳR03rOK-J,/RH,szd&Y)r$[)T&UD"PK!H8 nordypy-1.2.1.dist-info/METADATAeN@ E$j͊ EDMdDѱ'#BξX~!lӰ/wAK} I]Ю̰Ř44^@=xKEXF H`]Uq>Uֱugӑ<5Fճ>j@ޕFq׸ fm>&d6'CeՃph!o^F֟#Wg{â!p|Ѹ N tm%zm&>OPK!Hz(Znordypy-1.2.1.dist-info/RECORDIHF['3,z2(@HID_VujGD|7>#@Q&lcac+Us(^æueyŽ b?7+Mi1*`;ZT+ncUTY^4H BiϓM]6QSd%UڴK K ?\^3=bE~.Ჷ3%,dpqd -h:Fކh0<2VՕe Տ5[6b4E8 "-~۵l7+;?EYUL[},u Mb =ȲyIځ?f6yޞ@89Kpkhᓂ#lnordypy/package_resources/assets/default/folder_structure.yamlPKALJ4nnordypy/package_resources/assets/default/therock.txtPKALy6tnordypy/package_resources/assets/default/therock25.txtPKAL Q@}vnordypy/package_resources/document_templates/default/one_page.mdPKsKN$##xnordypy-1.2.1.dist-info/LICENSEPK!HMuSanordypy-1.2.1.dist-info/WHEELPK!H8 nordypy-1.2.1.dist-info/METADATAPK!Hz(ZDnordypy-1.2.1.dist-info/RECORDPK