PK,Op~datapungi_fed/__init__.py""" Gets data from datapungi_fed ({projectName}) by connecting to its API.""" import pandas import requests import sys from datapungi_fed.api import * import datapungi_fed.tests as tests __version__ = '0.1.0' class topCall(sys.modules[__name__].__class__): def __call__(self,*args,**kwargs): coreClass = data() return(coreClass(*args,**kwargs)) sys.modules[__name__].__class__ = topCallPK,O7& datapungi_fed/api.pyimport pandas as pd import requests import sys #from datapungi_fed import generalSettings from datapungi_fed import generalSettings from datapungi_fed import drivers #import drivers class data(): ''' the purpose of this class is to provide an environment where the shared data needed to establish a connection is loaded and to be a one stop shop of listing all available drivers. :param connectionParameters: a dictionary with at least 'key', and 'url' {'key': 'your key', 'description': 'FED data', 'url': ''} :param userSettings: settings saved in the packge pointing to a yaml/json or env containing the connection parameters ''' def __init__(self,connectionParameters = {}, userSettings = {}): self.__connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings ) self._metadata = self.__connectInfo.packageMetadata self._help = self.__connectInfo.datasourceOverview #load drivers: loadInfo = {'baseRequest' : self.__connectInfo.baseRequest, 'connectionParameters' : self.__connectInfo.connectionParameters} self.datasetlist = drivers.datasetlist(**loadInfo) self.categories = drivers.categories(**loadInfo) self.releases = drivers.releases(**loadInfo) self.series = drivers.series(**loadInfo) self.sources = drivers.sources(**loadInfo) self.tags = drivers.tags(**loadInfo) def __call__(self,*args,**kwargs): return(self.series(*args,**kwargs)) def __str__(self): return('\nList of drivers and their shortcuts') def _clipcode(self): try: self._lastCalledDriver.clipcode() except: print('Get data using a driver first, eg: ') #eg: data.NIPA("T10101", verbose = True) def _docDriver(self,driverName,printHelp=True): ''' Given the delegated method name, get the __doc__ of its class. eg: returns the __doc__ of the main method inside the driver. ''' #eg: _docDriver('NIPA') #parentName = list(self.DELEGATED_METHODS.keys())[list(self.DELEGATED_METHODS.values()).index([driverName])] #outhelp = getattr(getattr(self,parentName ),driverName).__doc__ #if printHelp: # print(outhelp) #return(outhelp) return('') if __name__ == '__main__': d = data() print(d) print(d.datasetlist()) print(d.categories(125)) print(d.releases()) print(d.series('GDP')) print(d('GNP')) print(d.sources('1')) print(d.tags(tag_names='monetary+aggregates;weekly')) PK1Ooi0$0$0datapungi_fed/driverCore.py''' Base driver class ''' import pandas as pd import requests import json from copy import deepcopy import pyperclip import math import re import inspect import yaml import itertools from datetime import datetime import warnings import functools from . import generalSettings #NOTE: projectName #import generalSettings #NOTE: projectName from . import utils #NOTE: projectName #import utils #NOTE: projectName class driverCore(): def __init__(self,dbGroupName='',defaultQueryFactoryEntry='', baseRequest={},connectionParameters={},userSettings={}): self._connectionInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings ) self._baseRequest = self._getBaseRequest(baseRequest,connectionParameters,userSettings) self._lastLoad = {} #data stored here to assist functions such as clipcode self._queryFactory = {} #specific drivers will populate this. #specific to fed data: if dbGroupName: self.dbGroupName = dbGroupName self.dbParams = self._dbParameters(self.dbGroupName) self.queryFactory = { dbName : self._selectDBQuery(self._query, dbName) for dbName in self.dbParams.keys() } self.defaultQueryFactoryEntry = defaultQueryFactoryEntry #the entry in query factory that __call__ will use. def _queryApiCleanOutput(self,urlPrefix,dbName,params,warningsList,warningsOn,verbose): ''' Core steps of querying and cleaning data. Notice, specific data cleaning should be implemented in the specific driver classes Args: self - should containg a base request (url) urlPrefix (str) - a string to be appended to request url (eg, https:// ...// -> https//...//urlPrefix?) api (str) - the database being queried (this gets added to the urlPrefix) localVars - the locals() of the main method - basically contain the values of args of the method method - the function itself (driver) used to get the values of method inputs params (dict) - usually empty, override any query params with the entries of this dictionary nonQueryArgs (list) - the inputs of the method that are not used in a query (eg, verbose) warningsList (list) - the list of events that can lead to warnings warningsOn (bool) - turn on/off driver warnings verbose (bool) - detailed output or short output ''' #get data query = self._getBaseQuery(urlPrefix,dbName,params) retrivedData = requests.get(**query) #clean data df_output = self._cleanOutput(dbName,query,retrivedData) #print warning if there is more data the limit to download for entry in warningsList: self._warnings(entry,retrivedData,warningsOn) #short or detailed output, update _lastLoad attribute: output = self._formatOutputupdateLoadedAttrib(query,df_output,retrivedData,verbose) return(output) def _query(self,dbName,params={},file_type='json',verbose=False,warningsOn=True): ''' Args: params file_type verbose warningsOn ''' # get requests' query inputs warningsList = ['countPassLimit'] # warn on this events. prefixUrl = self.dbParams[dbName]['urlSuffix'] output = self._queryApiCleanOutput(prefixUrl, dbName, params, warningsList, warningsOn, verbose) return(output) def __getitem__(self,dbName): return(self.queryFactory[dbName]) def __call__(self,*args,**kwargs): out = self.queryFactory[self.defaultQueryFactoryEntry](*args,**kwargs) return(out) def _getQueryArgs(self,dbName,*args,**kwargs): ''' Map args and kwargs to driver args ''' #paramaters to be passed to a requests query: paramArray = self.dbParams[dbName]['params'] params = dict(zip(paramArray,args)) paramsAdd = {key:val for key, val in kwargs.items() if key in paramArray} params.update(paramsAdd) #non query options (eg, verbose) otherArgs = {key:val for key, val in kwargs.items() if not key in paramArray} return({**{'params':params},**otherArgs}) def _selectDBQuery(self,queryFun,dbName): ''' Fix a generic query to a query to dbName, creates a lambda that, from args/kwargs creates a query of the dbName ''' fun = functools.partial(queryFun,dbName) lfun = lambda *args,**kwargs: fun(**self._getQueryArgs(dbName,*args,**kwargs)) #add quick user tips lfun.options = self.dbParams[dbName]['params'] return(lfun) def _cleanOutput(self,api,query,retrivedData): ''' This is a placeholder - specific drivers should have their own cleaning method ''' return(retrivedData) def _getBaseQuery(self,urlPrefix,dbName,params): ''' Return a dictionary of request arguments. Args: urlPrefix (str) - string appended to the end of the core url (eg, series -> http:...\series? ) api (str) - (Specific to datapungi_fed) the name of the database (eg, categories) locals - local data of othe method - to get the passed method arguments method (func) - the actual method being called (not just a name, will use this to gets its arguments. eg, driver's main method) params (dict) - a dictionary with request paramters used to override all other given parameters removeMethodArgs (list) - the arguments of the method that are not request parameters (eg, self, params, verbose) Returns: query (dict) - a dictionary with 'url' and 'params' (a string) to be passed to a request ''' query = deepcopy(self._baseRequest) #update query url query['url'] = query['url']+urlPrefix #update basequery with passed parameters #allArgs = inspect.getfullargspec(method).args #inputParams = { key:localVars[key] for key in allArgs if key not in removeMethodArgs } #args that are query params #inputParams = dict(filter( lambda entry: entry[1] != '', inputParams.items() )) #filter params. # ##override if passing arg "params" is non-empty: ## - ensure symbols such as + and ; don't get sent to url symbols FED won't read #query['params'].update(inputParams) query['params'].update(params) query['params'] = '&'.join([str(entry[0]) + "=" + str(entry[1]) for entry in query['params'].items()]) return(query) def _getBaseRequest(self,baseRequest={},connectionParameters={},userSettings={}): ''' Write a base request. This is the information that gets used in most requests such as getting the userKey ''' if baseRequest =={}: connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings ) return(connectInfo.baseRequest) else: return(baseRequest) def _formatOutputupdateLoadedAttrib(self,query,df_output,retrivedData,verbose): if verbose == False: self._lastLoad = df_output return(df_output) else: code = _getCode(query,self._connectionInfo.userSettings,self._cleanCode) output = dict(dataFrame = df_output, request = retrivedData, code = code) self._lastLoad = output return(output) def _dbParameters(self,dbGroupName = ''): ''' The parameters of each database in the group (will be assigned empty by default) ''' dataPath = utils.getResourcePath('/config/datasetlist.yaml') with open(dataPath, 'r') as yf: datasetlist = yaml.safe_load(yf) if dbGroupName == '': return(datasetlist) #get the entry of the group: datasets = list(filter( lambda x: x['group'] == dbGroupName , datasetlist))[0]['datasets'] removeCases = lambda array: list(filter( lambda x: x not in ['api_key','file_type'] , array )) dbParams = { entry['short name'] : { 'urlSuffix' : entry['database'] , 'json key': entry['json key'], 'params': removeCases(entry['parameters']) } for entry in datasets } return(dbParams) def _warnings(self,warningName,inputs,warningsOn = True): if not warningsOn: return if warningName == 'countPassLimit': ''' warns if number of lines in database exceeds the number that can be downloaded. inputs = a request result of a FED API ''' _count = inputs.json().get('count',1) _limit = inputs.json().get('limit',1000) if _count > _limit: warningText = 'NOTICE: dataset exceeds download limit! Check - count ({}) and limit ({})'.format(_count,_limit) warnings.warn(warningText) def _getBaseCode(self,codeEntries): ''' The base format of a code that can be used to replicate a driver using Requests directly. ''' userSettings = utils.getUserSettings() pkgConfig = utils.getPkgConfig() storagePref = userSettings['ApiKeysPath'].split('.')[-1] passToCode = {'ApiKeyLabel':userSettings["ApiKeyLabel"], "url":pkgConfig['url'], 'ApiKeysPath':userSettings['ApiKeysPath']} if storagePref == 'json': code = ''' import requests import json import pandas as pd # json file should contain: {"BEA":{"key":"YOUR KEY","url": "{url}" } apiKeysFile = '{ApiKeysPath}' with open(apiKeysFile) as jsonFile: apiInfo = json.load(jsonFile) url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key'] '''.format(**passToCode) if storagePref == 'env': code = ''' import requests import os import pandas as pd url = "{url}" key = os.getenv("{ApiKeyLabel}") '''.format(**passToCode) if storagePref == 'yaml': code = ''' import requests import yaml import pandas as pd apiKeysFile = '{ApiKeysPath}' with open(apiKeysFile, 'r') as stream: apiInfo= yaml.safe_load(stream) url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key'] ''' return(code) def _getCode(self,query,userSettings={},pandasCode=""): #general code to all drivers: try: url = query['url'] if not userSettings: #if userSettings is empty dict apiKeyPath = generalSettings.getGeneralSettings( ).userSettings['ApiKeysPath'] else: apiKeyPath = userSettings['ApiKeysPath'] except: url = " incomplete connection information " apiKeyPath = " incomplete connection information " baseCode = _getBaseCode([url,apiKeyPath]) #specific code to this driver: queryClean = deepcopy(query) queryClean['url'] = 'url' queryClean['params']['UserID'] = 'key' queryCode = ''' query = {} retrivedData = requests.get(**query) {} #replace json by xml if this is the request format '''.format(json.dumps(queryClean),pandasCode) queryCode = queryCode.replace('"url": "url"', '"url": url') queryCode = queryCode.replace('"UserID": "key"', '"UserID": key') return(baseCode + queryCode) def _clipcode(self): ''' Copy the string to the user's clipboard (windows only) ''' try: pyperclip.copy(self._lastLoad['code']) except: print("Loaded session does not have a code entry. Re-run with verbose option set to True. eg: v.drivername(...,verbose=True)")PK 1OK7FS+S+datapungi_fed/drivers.py''' Construct drivers connecting to databases. ''' import pandas as pd import requests import json from copy import deepcopy import pyperclip import math import re import inspect import yaml import itertools import warnings from datetime import datetime from datapungi_fed import generalSettings # NOTE: projectName #import generalSettings #NOTE: projectName #from datapungi_fed import utils # NOTE: projectName #import utils #NOTE: projectName from datapungi_fed.driverCore import driverCore #from driverCore import driverCore #TODO: given a series query, calc if within datalimit. if not, break down query and do it in pieces. #TODO: decorate _query of series to handle: arrays of symbols, tuples of symbols #TODO: decorate _query of series to have "start" and "end" and set these to "observations_start..." class datasetlist(driverCore): def _query(self): ''' Returns name of available datasets, a short description and their query parameters. Args: none Output: - pandas table with query function name, database name, short description and query parameters. ''' #get all dictionary of all drivers (in config/datasetlist.yaml) datasetlist = self._dbParameters() datasetlistExp = [[{**entry, **dataset} for dataset in entry.pop('datasets')] for entry in datasetlist] datasetlistFlat = list(itertools.chain.from_iterable( datasetlistExp)) # flatten the array of array df_output = pd.DataFrame(datasetlistFlat) return(df_output) def __call__(self): return(self._query()) class categories(driverCore): def __init__(self,dbGroupName = 'Categories',defaultQueryFactoryEntry='category',**kwargs): ''' Initializes a dictionary of db queries ''' super(categories, self).__init__(dbGroupName,defaultQueryFactoryEntry,**kwargs) def _cleanOutput(self, dbName, query, retrivedData): dataKey = self.dbParams[dbName]['json key'] self._cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format( dataKey) df_output = pd.DataFrame( retrivedData.json()[dataKey]) # TODO: deal with xml warnings.filterwarnings("ignore", category=FutureWarning) setattr(df_output, '_meta', dict(filter( lambda entry: entry[0] != dataKey, retrivedData.json().items()))) # TODO: silence warning return(df_output) def _driverMetadata(self): self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] class releases(driverCore): def __init__(self,dbGroupName = 'Releases',defaultQueryFactoryEntry='releases',**kwargs): ''' Initializes a dictionary of db queries ''' super(releases, self).__init__(dbGroupName,defaultQueryFactoryEntry,**kwargs) def _cleanOutput(self, dbName, query, retrivedData): dataKey = self.dbParams[dbName]['json key'] self._cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format( dataKey) df_output = pd.DataFrame( retrivedData.json()[dataKey]) # TODO: deal with xml warnings.filterwarnings("ignore", category=FutureWarning) setattr(df_output, '_meta', dict(filter( lambda entry: entry[0] != dataKey, retrivedData.json().items()))) # TODO: silence warning return(df_output) def _driverMetadata(self): self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] class series(driverCore): def __init__(self,dbGroupName = 'Series',defaultQueryFactoryEntry='observations',**kwargs): ''' Initializes a dictionary of db queries ''' super(series, self).__init__(dbGroupName,defaultQueryFactoryEntry,**kwargs) def _cleanOutput(self, dbName, query, retrivedData): dataKey = self.dbParams[dbName]['json key'] self._cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format( dataKey) df_output = pd.DataFrame( retrivedData.json()[dataKey]) # TODO: deal with xml df_output = df_output.drop(['realtime_end','realtime_start'],axis=1) df_output['date'] = pd.to_datetime(df_output['date']) df_output.set_index('date',inplace=True) df_output.value = pd.to_numeric(df_output.value,errors = 'coerse') warnings.filterwarnings("ignore", category=FutureWarning) setattr(df_output, '_meta', dict(filter( lambda entry: entry[0] != dataKey, retrivedData.json().items()))) # TODO: silence warning return(df_output) def _driverMetadata(self): self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] class sources(driverCore): def __init__(self,dbGroupName = 'Sources',defaultQueryFactoryEntry='source',**kwargs): ''' Initializes a dictionary of db queries ''' super(sources, self).__init__(dbGroupName,defaultQueryFactoryEntry,**kwargs) def _cleanOutput(self, dbName, query, retrivedData): dataKey = self.dbParams[dbName]['json key'] self._cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format( dataKey) df_output = pd.DataFrame( retrivedData.json()[dataKey]) # TODO: deal with xml warnings.filterwarnings("ignore", category=FutureWarning) setattr(df_output, '_meta', dict(filter( lambda entry: entry[0] != dataKey, retrivedData.json().items()))) # TODO: silence warning return(df_output) def _driverMetadata(self): self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] class tags(driverCore): def __init__(self,dbGroupName = 'Tags',defaultQueryFactoryEntry='related_tags',**kwargs): ''' Initializes a dictionary of db queries ''' super(tags, self).__init__(dbGroupName,defaultQueryFactoryEntry,**kwargs) def _cleanOutput(self, dbName, query, retrivedData): dataKey = self.dbParams[dbName]['json key'] self._cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format( dataKey) df_output = pd.DataFrame( retrivedData.json()[dataKey]) # TODO: deal with xml warnings.filterwarnings("ignore", category=FutureWarning) setattr(df_output, '_meta', dict(filter( lambda entry: entry[0] != dataKey, retrivedData.json().items()))) # TODO: silence warning return(df_output) def _driverMetadata(self): self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] if __name__ == '__main__': #import datapungi_fed as dpf pass #d = categories() #v = d(125);print(v) #v = d['category'](125);print(v) #v = d['children'](13);print(v) #v = d['related'](32073);print(v) #v = d['series'](125);print(v) #v = d['tags'](125);print(v) #v = d['related_tags'](125,tag_names="services;quarterly");print(v) #d = releases() #v = d();print(1,v) #v = d['release/dates'](release_id=53); print(2,v) #v = d['release'](release_id=53); print(3,v) #v = d['release/dates'](release_id=53); print(4,v) #v = d['release/series'](release_id=53); print(5,v) #v = d['release/sources'](release_id=53); print(6,v) #v = d['release/tags'](release_id=53); print(7,v) #v = d['release/related_tags'](release_id='86',tag_names='sa;foreign'); print(8,v) #v = d['release/tables'](release_id=53); print(9,v) d = series() v = d('GDP') #v = d['series']('GDP');print(1,v) #v = d['categories']('EXJPUS');print(2,v) #v = d['observations']('GNP');print(3,v) #v = d['release']('IRA');print(4,v) #v = d['search'](search_text='monetary+service+index');print(5,v) #v = d['search/tags'](series_search_text='monetary+service+index');print(6,v) #v = d['search/related_tags'](series_search_text='mortgage+rate',tag_names='30-year;frb');print(7,v) #v = d['tags'](series_id='STLFSI');print(8,v) #v = d['updates']();print(9,v) #v = d['vintagedates']('GNPCA');print(10,v) #d = tags() #v = d('monetary+aggregates;weekly');print(1,v) #v = d['tags']();print(2,v) #v = d['related_tags'](tag_names='monetary+aggregates;weekly');print(3,v) #v = d['tags/series'](tag_names='slovenia;food;oecd');print(4,v) #d = datasetlist() #v = d(); print(v) #d = sources() #v = d('1') #v = d['source/releases']('1') # print(_getBaseRequest()) # dataselist #d = getDatasetlist() #d = datasetlist() #print(d()) # tags #d = tags() #print(d(tag_names='monetary+aggregates;weekly')) #v = d.tags('related_tags', tag_names='monetary+aggregates;weekly') #print(v) #v = d.tags() #v = d.tags(api='tags/series',tag_names='slovenia;food;oecd') # categories #v = categories() #print(v(125)) #print(v['category'](125)) #print(v['category'].options) # releases #d = releases() #v = d.releases() #print(d()) # sources #d = sources() #v = d.sources() #print(d('1')) #v = d.sources('source/releases','1') # series #d = series() #print(d('GDP')) #print(v, v.meta) # print(v,v.meta) PK+O datapungi_fed/generalSettings.py''' .generalSettings ~~~~~~~~~~~~~~~~~ Loads general information: metadata of the datasource, metadata of the package's database drives (methods connecting to the databases of the datasource), and the datasource url and user api key. ''' from datapungi_fed import utils class getGeneralSettings(): #NOTE: write as a mixin? def __init__(self,connectionParameters={},userSettings={}): ''' sessionParameters - API key and the url (most used) of the datasource entry should look like: {'key': 'your key', 'description': 'BEA data', 'address': 'https://apps.bea.gov/api/data/'} userSettings - containg things like the path to api keys, preferred output format (json vs xml) datasourceOverview - a quick description of the datasource and its license packageMetadata - basic info on the package - to be used in a GUI or catalog of methods that read data. Also, "databases" will get automaticall updated with info on the methods that get specific dataset from the datasource. A typical entry should look like: { "displayName":"List of Datasets", "method" :"datasetlist", #NOTE run with getattr(data,'datasetlist')() "params" :{}, #No parameters in this case. } ''' #Load, for example, API Key and the (most used) path to the datasource self.userSettings = utils.getUserSettings(userSettings=userSettings) self.connectionParameters = utils.getConnectionParameters(connectionParameters,userSettings) self.baseRequest = getBaseRequest(self.connectionParameters,self.userSettings) self.datasourceOverview = getDatasourceOverview() self.packageMetadata = getPackageMetadata() def getBaseRequest(connectionParameters={},userSettings={}): ''' translate the connection parameters, a flat dictionary, to the format used by requests (or other connector), also, translate names to ones used by the datasource. ''' if userSettings == {}: userSettings = dict(file_type = 'JSON') print("result format was set to JSON since none could be found or was passed as a 'ResultFormat' in userSettings") output = { #this is, for example, the base of a requests' request - the drivers add to this. 'url' : connectionParameters['url'], 'params' :{ 'api_key' : connectionParameters['key'], 'file_type': userSettings["ResultFormat"] } } return(output) def getDatasourceOverview(): output = ''' Userguides: Licenses (always check with the data provider): Data used is sourced from Federal Reserve (FED) As stated on its website: - For more information, see: ''' return(output) def getPackageMetadata(): output = { "name": "datapungi_fed", "loadPackageAs" : "dpf", "apiClass": "data", "displayName": "FED", "description": "Access data from the Federal Reserve (FED)", "databases": [ #TODO: pass this to the driver, load the individual drivers metdata in the api. { "displayName":"categories", "method" :"categories", "params" :{}, #Parameters and default options. }, { "displayName":"tags", "method" :"tags", "params" :{ 'category_id': '125', 'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names' : '', 'exclude_tag_names':'', 'tag_group_id': '', 'search_text': '', 'limit':'', 'offset':'', 'order_by':'', 'sort_order':'' }, #Parameters and default options. }, ], } return(output)PK+Ox'C99datapungi_fed/temp.txtapi = 'series', series_id = '', realtime_start = '', realtime_end = '', file_type = 'json', limit = '', offset = '', sort_order = '', observation_start = '', observation_end = '', units = '', frequency = '', aggregation_method = '', output_type = '', vintage_dates = '', search_text = '', search_type = '', order_by = '', filter_variable = '', filter_value = '', tag_names = '', exclude_tag_names = '', series_search_text = '', #(fred/series/search/tags or related_tags args) tag_group_id = '', #(fred/series/search/tags or related_tags args) tag_search_text = '', #(fred/series/search/tags or related_tags args) PK,OvYYdatapungi_fed/test.pyclass F(object): def __init__(self,a,b): self.a = 1 self.b = 2 self.ff = {} self.ff['aa'] = self.f1 self.ff['bb'] = self.f2 def f1(self,x): ''' test ''' return(x) def f2(self,x,y): return(y+x+self.a) def __getitem__(self,key): return( self.ff[key]) def __call__(self,*args,**kwargs): #kwargs = {key:locals()[key] for key in inspect.getfullargspec(self.f2).args} return() with open(os.getcwd()+'/datapungi_fed/config/datasetlist.yaml') as yf: d = yaml.safe_load(yf) if __name__ == '__main__': f = F(1,2) print(f['bb'](1)) print(f(1,2) ) [api_key,file_type,search_text,search_type,realtime_start,realtime_end,limitoffset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]PK+O'3datapungi_fed/utils.py''' datapungi_fed.utils ~~~~~~~~~~~~~~~~~~ This module provides utility functions that are used within datapungibea and by the users when they want to update internal configs. ''' import json import pkg_resources import yaml import os def getConnectionParameters(connectionParameters = {}, userSettings = {}): ''' :param userSettings: (optional) dictionary of ``'ApiKeysPath': a path to json with API Keys`` and ``'ApiKeyLabel': label (key) of JSON entry containing the key`` If userSettings is an empty dictionary (default option), method will try to load it from saved userSettings. output, a dictionary with user key and datasource url ''' if not connectionParameters == {}: if isinstance(connectionParameters,str): #in this case, user only passes a key, no url pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) connectionParameters = {'url':pkgCfg['url'],'key':connectionParameters} return(connectionParameters) if userSettings == {}: userSettings = getUserSettings() try: storingMethod = userSettings['ApiKeysPath'].split('.')[-1] labelName = userSettings['ApiKeyLabel'] if storingMethod == 'json': with open(userSettings['ApiKeysPath']) as jsonFile: connectionParameters = (json.load(jsonFile))[labelName] elif storingMethod == 'yaml': with open(userSettings['ApiKeysPath'], 'r') as stream: pkgCfg = yaml.safe_load(stream) connectionParameters = pkgCfg[labelName] elif storingMethod =='env': #look for an environment variable called something like BEA_url url = os.getenv(labelName+'_url') if url == None: #if can't find it, load from the package config pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) url = pkgCfg['url'] connectionParameters = {'key':os.getenv(labelName),'url':url} return(connectionParameters) except: print('Could not find dictionary key ' + labelName + ' in \n '+ userSettings['ApiKeysPath']) return def getResourcePath(relativePath, resource_package = __name__): ''' Given relative, get its full path eg: relative path: /config/userSettings.json will return datapungibea path + relative path note: can replace resource_package with package name: eg: 'datapungi_fed' ''' fullPath = pkg_resources.resource_filename(resource_package, relativePath) return(fullPath) def getUserSettings(userSettings = {}): ''' loads the userSettings file. ''' if not userSettings == {}: return(userSettings) userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: userSettings = json.load(jsonFile) return(userSettings) except: print('.utils.py: Could not open the userSettings: \n ./config/userSettings.json \n returning empty dictionary') return({}) def getPkgConfig(): ''' Reads the PkgConfig - eg the default url ''' pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) return(pkgCfg) def setPkgConfig(newUrl): ''' save the default url of the api ''' if not isinstance(newUrl,str): print('Provide a string of the API URL') pass pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) pkgCfg['url'] = newUrl with open(pkgcfgPath, 'w') as outfile: yaml.dump(pkgCfg,outfile, default_flow_style=False) def setUserSettings(newPath): #TODO: check if still valid ''' sets the api key path in the package config file. eg: import datapungi_fed as dp dp.utils.setUserSettings('myPath') ''' userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: config = json.load(jsonFile) except: print('Could not open the configuration file: \n datapungi/config/userSettings.json') pass config['ApiKeysPath'] = newPath try: with open(userSettingsPath,'w') as jsonFile: json.dump(config,jsonFile) print('Path to the API Keys updated! New Path: \n' + config['ApiKeysPath']) except: print('Could not save the configuration to file: \n datapungi_fed/config/userSettings.json \n Path API Key not updated') pass def setKeyName(newName): #TODO: check if still valid ''' sets the api key name in the package config file. eg: import datapungi_fed as dp dp.utils.setKeyName('BEA_Secrete') ''' userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: config = json.load(jsonFile) except: print('Could not open the configuration file: \n datapungi_fed/config/userSettings.json') pass config["ApiKeyLabel"] = newName try: with open(userSettingsPath,'w') as jsonFile: json.dump(config,jsonFile) print('Name of the API Keys updated! New Name: \n' + config["ApiKeyLabel"]) except: print('Could not save the configuration to file: \n datapungibea/config/userSettings.json \n API Key Name not updated') pass def setTestFolder(newTestsPath): userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: config = json.load(jsonFile) except: print('Could not open the configuration file: \n datapungi/config/userSettings.json') pass config['TestsOutputPath'] = newTestsPath try: with open(userSettingsPath,'w') as jsonFile: json.dump(config,jsonFile) print('Path to the Tests Output Folder updated! New Path: \n' + config['TestsOutputPath']) except: print('Could not save the configuration to file: \n datapungibea/config/userSettings.json \n Path to the Tests Output not updated') pass if __name__ == '__main__': setTestFolder('U:/Tests')PK+Ogg#datapungi_fed/.vscode/settings.json{ "python.pythonPath": "C:\\Users\\jjott\\AppData\\Local\\Programs\\Python\\Python37\\python.exe" }PK+O datapungi_fed/config/__init__.pyPK,OMx: %datapungi_fed/config/datasetlist.yaml- api: FRED group: Categories datasets: - { short name: category, database: category, json key: categories, description: Get a category., parameters: [api_key,file_type,category_id] } - { short name: children, database: category/children, json key: categories, description: Get the child categories for a specified parent category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end]} - { short name: related, database: category/related, json key: categories, description: Get the related categories for a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end]} - { short name: series, database: category/series, json key: seriess, description: Get the series in a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end,limit,offset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]} - { short name: tags, database: category/tags, json key: tags, description: Get the tags for a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end,tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: related_tags, database: category/related_tags, json key: tags, description: Get the related tags for a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end,tag_names,exclude_tag_names,tag_group_id,search_text,limit,offset,order_by]} - api: FRED group: Releases datasets: - { short name: releases, database: releases, json key: releases, description: Get all releases of economic data., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - { short name: releases/dates, database: releases/dates, json key: release_dates, description: Get release dates for all releases of economic data., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,order_by,sort_order,include_release_dates_with_no_data]} - { short name: release, database: release, json key: releases, description: Get a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end]} - { short name: release/dates, database: release/dates, json key: release_dates, description: Get release dates for a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,limit,offset,sort_order,include_release_dates_with_no_data]} - { short name: release/series, database: release/series, json key: seriess, description: Get the series on a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,limit,offset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]} - { short name: release/sources, database: release/sources, json key: sources, description: Get the sources for a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end]} - { short name: release/tags, database: release/tags, json key: tags, description: Get the tags for a release., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: release/related_tags, database: release/related_tags, json key: tags, description: Get the related tags for a release., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,tag_names,exclude_tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: release/tables, database: release/tables, json key: elements, description: Get the release tables for a given release., parameters: [api_key,file_type,release_id,element_id,include_observation_values,observation_date]} - api: FRED group: Series datasets: - { short name: series, database: series, json key: seriess, description: Get an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end]} - { short name: categories, database: series/categories, json key: categories, description: Get the categories for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end]} - { short name: observations, database: series/observations, json key: observations, description: Get the observations or data values for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end,limit,offset,sort_order,observation_start,observation_end,units,frequency,aggregation_method,output_type,vintage_dates]} - { short name: release, database: series/release, json key: releases, description: Get the release for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end]} - { short name: search, database: series/search, json key: seriess, description: Get economic data series that match keywords., parameters: [api_key,file_type,search_text,search_type,realtime_start,realtime_end,limitoffset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]} - { short name: search/tags, database: series/search/tags, json key: tags, description: Get the tags for a series search., parameters: [api_key,file_type,series_search_text,realtime_start,realtime_end,tag_names,tag_group_id,tag_search_text,limit,offset,order_by,sort_order]} - { short name: search/related_tags, database: series/search/related_tags, json key: tags, description: Get the related tags for a series search., parameters: [api_key,file_type,series_search_text,realtime_start,realtime_end,tag_names,exclude_tag_names,tag_group_id,tag_search_text,limit,offset,order_by,sort_order]} - { short name: tags, database: series/tags, json key: tags, description: Get the tags for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end,order_by,sort_order]} - { short name: updates, database: series/updates, json key: seriess, description: Get economic data series sorted by when observations were updated on the FRED® server., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,filter_value,start_time,end_time]} - { short name: vintagedates, database: series/vintagedates, json key: vintage_dates, description: Get the dates in history when a series' data values were revised or new data values were released., parameters: [api_key,file_type,series_id,realtime_start,realtime_end,limit,offset,sort_order]} - api: FRED group: Sources datasets: - { short name: sources, database: sources, json key: sources, description: Get all sources of economic data., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - { short name: source, database: source, json key: sources, description: Get a source of economic data., parameters: [api_key,file_type,source_id,realtime_start,realtime_end]} - { short name: source/releases, database: source/releases, json key: releases, description: Get the releases for a source., parameters: [api_key,file_type,source_id,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - api: FRED group: Tags datasets: - { short name: tags, database: tags, json key: tags, description: "Get all tags, search for tags, or get tags by name.", parameters: [api_key,file_type,realtime_start,realtime_end,tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: related_tags, database: related_tags, json key: tags, description: "Get the related tags for one or more tags.", parameters: [api_key,file_type,tag_names,exclude_tag_names,tag_group_id,search_text,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - { short name: tags/series, database: tags/series, json key: seriess, description: Get the series matching tags., parameters: [api_key,file_type,tag_names,exclude_tag_names,realtime_start,realtime_end,limit,offset,order_by,sort_order]}PK+OG}T&&#datapungi_fed/config/pkgConfig.yamlurl : https://api.stlouisfed.org/fred/PK+O8dd&datapungi_fed/config/userSettings.json{"ApiKeysPath": "env", "ApiKeyLabel": "FED", "ResultFormat": "json", "TestsOutputPath": "C:/Tests/"}PK+OB)--datapungi_fed/tests/__init__.pyfrom datapungi_fed.tests.main import runTestsPK+Okddatapungi_fed/tests/conftest.py# content of conftest.py import pytest def pytest_addoption(parser): parser.addoption( "--cmdopt", action="store", default="", help="enter API key" ) @pytest.fixture def cmdopt(request): return request.config.getoption("--cmdopt")PK+OXXdatapungi_fed/tests/main.pyimport subprocess import os from datapungi_fed.utils import getUserSettings def runTests(outputPath='',testsPath='',verbose = True): if not testsPath: testsPath = os.path.dirname(os.path.abspath(__file__)).replace("\\","/") print('**************************** \nWill run tests in: ' + testsPath) if not outputPath: outputPath = "U:/" try: settingsFile = getUserSettings() outputPath = settingsFile['TestsOutputPath'] except: print("Could not load TestOutputPath from user settings. Perhaps run util.setTestFolder( FilePath ) ") subprocess.Popen('pytest ' + testsPath + ' --html='+outputPath+'datapungi_fed_Tests.html --self-contained-html') if verbose: print('Tests will be saved in '+outputPath+'datapungi_fed_Tests.html \n****************************') if __name__ == '__main__': from sys import argv import subprocess import os runTests() #print(os.path.dirname(os.path.realpath(__file__))) #query = subprocess.Popen('pytest --html=datapungibea_Tests.html') #print(query)PK+Oq #datapungi_fed/tests/test_drivers.pyimport datapungi_fed as dp import time import pandas as pd import os def executeCode(stringIn): ''' auxiliary function for tests: get the requests code as a string and try to execute it. ''' try: exec(stringIn+'\n') #exec('print("hi")') # return(dict( codeRun = True, codeOutput = locals()['df_output'] )) #try to output the dataframe called df_output except: try: exec(stringIn) #if no dataframe called output, try to see it at least can exec the code return(dict(codeRun = True, codeOutput = pd.DataFrame([]))) except: return(dict(codeRun = False, codeOutput = pd.DataFrame([]))) # start the driver - used by all tests def startDriver(cmdopt): if not cmdopt == "": connectionParameters = {"key": cmdopt, "url": ""} else: connectionParameters = {} data = dp.data(connectionParameters) return(data) # content of test_sample.py def test_startDriver(cmdopt): data = startDriver(cmdopt) assert data def test_categories(cmdopt): data = startDriver(cmdopt) driver = data.categories(**{},verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_tags(cmdopt): data = startDriver(cmdopt) driver = data.tags(**{ 'category_id': '125', 'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names' : '', 'exclude_tag_names':'', 'tag_group_id': '', 'search_text': '', 'limit':'', 'offset':'', 'order_by':'', 'sort_order':'' },verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the if __name__ == '__main__': #test_IIP() #test_datasetlist() #test_getParameterList() #test_getParameterValues() #test_NIPA() #test_fixedAssets()PK+O4Uk  %datapungi_fed/tests/test_driversCI.pyimport datapungi_fed as dp import time import pandas as pd import os def executeCode(stringIn): ''' auxiliary function for tests: get the requests code as a string and try to execute it. ''' try: exec(stringIn+'\n') #exec('print("hi")') # return(dict( codeRun = True, codeOutput = locals()['df_output'] )) #try to output the dataframe called df_output except: try: exec(stringIn) #if no dataframe called output, try to see it at least can exec the code return(dict(codeRun = True, codeOutput = pd.DataFrame([]))) except: return(dict(codeRun = False, codeOutput = pd.DataFrame([]))) # content of test_sample.py def test_startDriver(cmdopt): global data if not cmdopt == "": connectionParameters = {"key": cmdopt, "url": ""} else: connectionParameters = {} data = dp.data(connectionParameters) assert data return(data) def test_categories(): driver = data.categories(**{},verbose=True) #execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty #assert execCode['codeRun'] #try to execute the code. #assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_tags(): driver = data.tags(**{ 'category_id': '125', 'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names' : '', 'exclude_tag_names':'', 'tag_group_id': '', 'search_text': '', 'limit':'', 'offset':'', 'order_by':'', 'sort_order':'' },verbose=True) #execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty #assert execCode['codeRun'] #try to execute the code. #assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the if __name__ == '__main__': #test_answer('') #test_IIP() #test_datasetlist() #test_getParameterList() #test_getParameterValues() #test_NIPA() #test_fixedAssets()PK+O$` ..%datapungi_fed-0.1.0.dist-info/LICENSEMIT License Copyright (c) 2019 James Otterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.PK!HMuSa#datapungi_fed-0.1.0.dist-info/WHEEL HM K-*ϳR03rOK-J,/RH,szd&Y)r$[)T&UD"PK!HBt0 &#&datapungi_fed-0.1.0.dist-info/METADATAZms6_($*N΍6mbb:L'HHB·M&]);EMVV8HĀ.VBCi\j\]Z[dvΒ*&oVBzSU褵˪_^U!u>#:U&mtmaIUZUͦVX.e$Vo/SqyT0wӛ'rV*ˋk+mkh1'yOI 띴Ve{ eleO⪩, 3Y.Zx6USـWK%^?[(3>NEX?^2G7/m~?oO~7LyJ[>˩Y&:$j.RaocJK(zk]WX$fUDWب'~^%f$hJR5P.V癇N6r8՟c_Fq! v!@ W236+_zr5"]+3C5u-H0 FFH>V-wmv)%0)zltQ甞X kTFS qsF悪MRbp~v:L\pb?cRY2 %UOZڈ.[ 4eJUPJ7Ut%-g`ls;rUL,T P#IJڇ+}IZHjNEI9pje?9? NQi4Wam#n{ioaJ]bԶMi6Mm0f`qk`s}}¡ D["%ט~ ?5tfh5ۓW1Fdiz{!_coة<a{e xlIۺG%r"wzі%IS3YE1v$^#j]Qh=`@J&c1f X fT%ܻZ3ɫk8L@[Ħj]~%k;Zc4Qp e]`yR=ILZЧʱO"cv&O2I+|4TF\Ӛ } +U!| qPq{17r!U-*R}p|yQ?Q?ϙ7:P6);9f7%q-H?bѨv3J7X&:O O37)yiXYum1!k*rvvuyo/5{Q0٩l)lי-ki3G_XKxţ0ѼGJxDB498#O[JFѠ@ {pttəxu$~>{qvtt6988ډ$d} #[ٚ_agߕCn*ܾޏ-?t)#<*D3] .1*Zn+@gHPwxudfMx'G1h4>5:%s%2GGO*T3)6iBN::Gmq> Gb/|B/חqΞkOa nk@| NE a*\:w|)KK<_uvЫ]C]uzwU] Bsq{PEQIU@ɮa֗ SФGRV+*m-3zr[Hn wk1liL9OJvZK%%ϩ5uҝCSRmuٮ?H51{Aݮ+g G+Bت*mC2Dڽ+msCw3Fx68/bPvd؈#w=@K;w K-ޜ+P01 u^|@KjW.u`j7JݛA_k{J~d3 kbo"e*_<I/Ǜ. \KJ PK!Hi I$datapungi_fed-0.1.0.dist-info/RECORD˒X}? Tsa 䪈r  O?T8Q/232SE<]zCo}qu5;K[KR732_Y>)+[SRMpu\ؚ VNoe[RA}./[ ) jjV.!2Xc|kt% b6I}J_Cue߬Vov8zfl m LM=I]ժӬ( յᡳy8Luؽbϻ^sG;GCTGş:jol1l 晱2G|g'5 4?:*2v'.Й #8t+-g?de^(g2MլE鲳 >gwUF-nzVMF\犵/SY  b pXzvmY{gO+%@j;߮!:?(}/xI<'?-ʫ2/ahO欀 )3G|}p~| _Ai-5"kqjU,6Y4aSZ=y{=Zx.`uw&󋆊|Z`\|:xox^럻 mKO%G.#o8zGF#3{ {*I1)+(/Aex|.tJ2~god]rxwV4ňBd^~RzBK x,l0qޒ%b؏PK,Op~datapungi_fed/__init__.pyPK,O7& datapungi_fed/api.pyPK1Ooi0$0$0 datapungi_fed/driverCore.pyPK 1OK7FS+S+=datapungi_fed/drivers.pyPK+O hdatapungi_fed/generalSettings.pyPK+Ox'C99xdatapungi_fed/temp.txtPK,OvYY:}datapungi_fed/test.pyPK+O'3ƀdatapungi_fed/utils.pyPK+Ogg#datapungi_fed/.vscode/settings.jsonPK+O Rdatapungi_fed/config/__init__.pyPK,OMx: %datapungi_fed/config/datasetlist.yamlPK+OG}T&&#datapungi_fed/config/pkgConfig.yamlPK+O8dd&datapungi_fed/config/userSettings.jsonPK+OB)--ƽdatapungi_fed/tests/__init__.pyPK+Okd0datapungi_fed/tests/conftest.pyPK+OXXkdatapungi_fed/tests/main.pyPK+Oq #datapungi_fed/tests/test_drivers.pyPK+O4Uk  %datapungi_fed/tests/test_driversCI.pyPK+O$` ..%1datapungi_fed-0.1.0.dist-info/LICENSEPK!HMuSa#datapungi_fed-0.1.0.dist-info/WHEELPK!HBt0 &#&6datapungi_fed-0.1.0.dist-info/METADATAPK!Hi I$datapungi_fed-0.1.0.dist-info/RECORDPKh