PK9Ovdatapungi_fed/__init__.py""" Gets data from Federal Reserve (FED) by connecting to its API.""" import pandas import requests import sys from datapungi_fed.api import * import datapungi_fed.tests as tests __version__ = '0.1.3' class topCall(sys.modules[__name__].__class__): def __call__(self,*args,**kwargs): coreClass = data() return(coreClass(*args,**kwargs)) sys.modules[__name__].__class__ = topCallPK;Owǰn datapungi_fed/api.pyimport pandas as pd import requests import sys from datapungi_fed import generalSettings from datapungi_fed import drivers from datapungi_fed.driverCore import driverCore #from driverCore import driverCore #import drivers class data(): ''' the purpose of this class is to provide an environment where the shared data needed to establish a connection is loaded and to be a one stop shop of listing all available drivers. :param connectionParameters: a dictionary with at least 'key', and 'url' {'key': 'your key', 'description': 'FED data', 'url': ''} :param userSettings: settings saved in the packge pointing to a yaml/json or env containing the connection parameters ''' def __init__(self,connectionParameters = {}, userSettings = {}): self.__connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings ) self._metadata = self.__connectInfo.packageMetadata self._help = self.__connectInfo.datasourceOverview #load drivers: loadInfo = {'baseRequest' : self.__connectInfo.baseRequest, 'connectionParameters' : self.__connectInfo.connectionParameters} #specific drivers self.datasetlist = drivers.datasetlist(**loadInfo) #core drivers coreDriversParams = driverCore() for dbGroupName in [x['group'] for x in coreDriversParams._dbParams]: setattr(self, dbGroupName.lower(), driverCore(dbGroupName,**loadInfo)) def __call__(self,*args,**kwargs): return(self.series(*args,**kwargs)) def __str__(self): return('\nList of drivers and their shortcuts') def _clipcode(self): try: self._lastCalledDriver.clipcode() except: print('Get data using a driver first, eg: ') #eg: data.NIPA("T10101", verbose = True) def _docDriver(self,driverName,printHelp=True): ''' Given the delegated method name, get the __doc__ of its class. eg: returns the __doc__ of the main method inside the driver. ''' #eg: _docDriver('NIPA') #parentName = list(self.DELEGATED_METHODS.keys())[list(self.DELEGATED_METHODS.values()).index([driverName])] #outhelp = getattr(getattr(self,parentName ),driverName).__doc__ #if printHelp: # print(outhelp) #return(outhelp) return('') if __name__ == '__main__': d = data() print(d) print(d.datasetlist()) print(d.categories(125)) print(d.releases()) print(d.series('GDP')) print(d('GNP')) print(d.sources('1')) print(d.tags(tag_names='monetary+aggregates;weekly')) PK;O{EߪOOdatapungi_fed/driverCore.py''' Base driver class ''' import pandas as pd import requests import json from copy import deepcopy import pyperclip import math import re import inspect import yaml import itertools from datetime import datetime import warnings import functools from textwrap import dedent from datapungi_fed import generalSettings #NOTE: projectName #import generalSettings #NOTE: projectName from datapungi_fed import utils #NOTE: projectName #import utils #NOTE: projectName class driverCore(): r''' Given a dbGroupName and its default db, starts a factory of query functions - ie, a function for each db in the group. If dbGroupName is empty, return the list of dbGroups, dbs in the group, and their parameters ''' def __init__(self,dbGroupName='', baseRequest={},connectionParameters={},userSettings={}): #TODO: place defaultQueryFactoryEntry in yaml self._dbParams, self.defaultQueryFactoryEntry = self._getDBParameters(dbGroupName) self._ETDB = extractTransformDB(baseRequest,connectionParameters,userSettings) #a generic query is started self._ETFactory = extractTransformFactory(dbGroupName,self._ETDB,self._dbParams,self.defaultQueryFactoryEntry) self._driverMeta = driverMetadata()(dbGroupName) def __getitem__(self,dbName): return(self._ETFactory.extractTransformFactory[dbName]) def __call__(self,*args,**kwargs): out = self._ETFactory.extractTransformFactory[self.defaultQueryFactoryEntry](*args,**kwargs) return(out) def _getDBParameters(self,dbGroupName = ''): r''' The parameters of each database in the group (if empty returns all groups x databases) ''' dataPath = utils.getResourcePath('/config/datasetlist.yaml') with open(dataPath, 'r') as yf: datasetlist = yaml.safe_load(yf) if dbGroupName == '': defaultDB = {} return((datasetlist,defaultDB)) #get the entry of the group: selected = list(filter( lambda x: x['group'] == dbGroupName , datasetlist))[0] defaultDB = selected.get('default query','') datasets = selected.get('datasets',{}) removeCases = lambda array: list(filter( lambda x: x not in ['api_key','file_type'] , array )) dbParams = { entry['short name'] : { 'urlSuffix' : entry['database'] , 'json key': entry['json key'], 'params': removeCases(entry['parameters']) } for entry in datasets } return((dbParams,defaultDB)) class extractTransformFactory(): r''' given a groupName of databases, constructs dictionary of functions querying all of its databases ''' def __init__(self,dbGroupName,ETDB,dbParams,defaultQueryFactoryEntry): if dbGroupName: self.dbGroupName = dbGroupName self.dbParams = dbParams self.ETDB = ETDB self.ETDB(self.dbGroupName,self.dbParams) #update the connector to the databases with parameters specific to the collection of dbs. self.extractTransformFactory = { dbName : self.selectDBQuery(self.query, dbName) for dbName in self.dbParams.keys() } self.defaultQueryFactoryEntry = defaultQueryFactoryEntry #the entry in query factory that __call__ will use. else: self.extractTransformFactory = {} def query(self,*args,**kwargs): return( self.ETDB.query(*args,**kwargs) ) def selectDBQuery(self,queryFun,dbName): r''' Fix a generic query to a query to dbName, creates a lambda that, from args/kwargs creates a query of the dbName ''' fun = functools.partial(queryFun,dbName) lfun = lambda *args,**kwargs: fun(**self.getQueryArgs(dbName,*args,**kwargs)) #add quick user tips lfun.options = self.dbParams[dbName]['params'] return(lfun) def getQueryArgs(self,dbName,*args,**kwargs): r''' Map args and kwargs to driver args ''' #paramaters to be passed to a requests query: paramArray = self.dbParams[dbName]['params'] params = dict(zip(paramArray,args)) paramsAdd = {key:val for key, val in kwargs.items() if key in paramArray} params.update(paramsAdd) #non query options (eg, verbose) otherArgs = {key:val for key, val in kwargs.items() if not key in paramArray} return({**{'params':params},**otherArgs}) class extractTransformDB(): r''' Functions to connect and query a db given its dbName and dbParams (see yaml in config for these). ''' def __init__(self,baseRequest={},connectionParameters={},userSettings={}): ''' loads generic parametes (ie api key, location fo data.) ''' self._connectionInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings ) self._baseRequest = self.getBaseRequest(baseRequest,connectionParameters,userSettings) self._lastLoad = {} #data stored here to assist functions such as clipcode self._transformData = transformExtractedData() self._getCode = transformIncludeCodeSnippet() self._cleanCode = "" #TODO: improvable - this is the code snippet producing a pandas df def __call__(self,dbGroup,dbParams): r''' A call to an instance of the class Loads specific parameters of the dbs of dbGroup ''' self.dbGroup = dbGroup self.dbParams = dbParams def query(self,dbName,params={},file_type='json',verbose=False,warningsOn=True): r''' Args: params file_type verbose warningsOn ''' # get requests' query inputs warningsList = ['countPassLimit'] # warn on this events. prefixUrl = self.dbParams[dbName]['urlSuffix'] output = self.queryApiCleanOutput(prefixUrl, dbName, params, warningsList, warningsOn, verbose) return(output) def queryApiCleanOutput(self,urlPrefix,dbName,params,warningsList,warningsOn,verbose): r''' Core steps of querying and cleaning data. Notice, specific data cleaning should be implemented in the specific driver classes Args: self - should containg a base request (url) urlPrefix (str) - a string to be appended to request url (eg, https:// ...// -> https//...//urlPrefix?) params (dict) - usually empty, override any query params with the entries of this dictionary warningsList (list) - the list of events that can lead to warnings warningsOn (bool) - turn on/off driver warnings verbose (bool) - detailed output or short output ''' #get data query = self.getBaseQuery(urlPrefix,params) retrivedData = requests.get(** { key:entry for key, entry in query.items() if key in ['params','url'] } ) #clean data df_output,self._cleanCode = self.cleanOutput(dbName,query,retrivedData) #print warning if there is more data the limit to download for entry in warningsList: self._warnings(entry,retrivedData,warningsOn) #short or detailed output, update _lastLoad attribute: output = self.formatOutputupdateLoadedAttrib(query,df_output,retrivedData,verbose) return(output) def getBaseQuery(self,urlPrefix,params): r''' Return a dictionary of request arguments. Args: urlPrefix (str) - string appended to the end of the core url (eg, series -> http:...\series? ) dbName (str) - the name of the db being queried params (dict) - a dictionary with request paramters used to override all other given parameters Returns: query (dict) - a dictionary with 'url' and 'params' (a string) to be passed to a request ''' query = deepcopy(self._baseRequest) #update query url query['url'] = query['url']+urlPrefix query['params'].update(params) query['params_dict'] = query['params'] query['params'] = '&'.join([str(entry[0]) + "=" + str(entry[1]) for entry in query['params'].items()]) return(query) def formatOutputupdateLoadedAttrib(self,query,df_output,retrivedData,verbose): if verbose == False: self._lastLoad = df_output return(df_output) else: code = self._getCode.transformIncludeCodeSnippet(query,self._baseRequest,self._connectionInfo.userSettings,self._cleanCode) output = dict(dataFrame = df_output, request = retrivedData, code = code) self._lastLoad = output return(output) def cleanOutput(self,dbName,query,retrivedData): r''' This is a placeholder - specific drivers should have their own cleaning method this generates self._cleanCode ''' transformedOutput = self._transformData(self.dbGroup,dbName,self.dbParams,query,retrivedData) return(transformedOutput) def getBaseRequest(self,baseRequest={},connectionParameters={},userSettings={}): r''' Write a base request. This is the information that gets used in most requests such as getting the userKey ''' if baseRequest =={}: connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings ) return(connectInfo.baseRequest) else: return(baseRequest) def _warnings(self,warningName,inputs,warningsOn = True): if not warningsOn: return if warningName == 'countPassLimit': ''' warns if number of lines in database exceeds the number that can be downloaded. inputs = a request result of a FED API ''' _count = inputs.json().get('count',1) _limit = inputs.json().get('limit',1000) if _count > _limit: warningText = 'NOTICE: dataset exceeds download limit! Check - count ({}) and limit ({})'.format(_count,_limit) warnings.warn(warningText) class transformExtractedData(): def __call__(self,dbGroup,dbName,dbParams,query,retrivedData): if dbGroup == 'Series': return( self.cleanOutputSeries(dbName,dbParams,query,retrivedData) ) else: return( self.cleanOutput(dbName,dbParams,query,retrivedData) ) def cleanOutput(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags dataKey = dbParams[dbName]['json key'] cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey) df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml warnings.filterwarnings("ignore", category=UserWarning) setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items()))) warnings.filterwarnings("always", category=UserWarning) return((df_output,cleanCode)) def cleanOutputSeries(self, dbName, dbParams,query, retrivedData): #series dataKey = dbParams[dbName]['json key'] cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey) df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml if dbName == 'observations': seriesID = query['params_dict']['series_id'] #{ x.split('=')[0] : x.split('=')[1] for x in query['params'].split("&") }['series_id'] df_output = df_output.drop(['realtime_end','realtime_start'],axis=1) df_output['date'] = pd.to_datetime(df_output['date']) df_output.set_index('date',inplace=True) df_output.value = pd.to_numeric(df_output.value,errors = 'coerse') df_output = df_output.rename({'value':seriesID},axis='columns') cleanCode += "\ndf_output = df_output.drop(['realtime_end','realtime_start'],axis=1) " cleanCode += "\ndf_output['date'] = pd.to_datetime(df_output['date']) " cleanCode += "\ndf_output.set_index('date',inplace=True) " cleanCode += "\ndf_output.value = pd.to_numeric(df_output.value,errors = 'coerse') " cleanCode += "\ndf_output = df_output.rename({{ 'value' : '{seriesID}' }},axis='columns')".format(**{'seriesID':seriesID}) #TODO: relabel value column with symbol warnings.filterwarnings("ignore", category=UserWarning) setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items()))) warnings.filterwarnings("always", category=UserWarning) return((df_output,cleanCode)) class transformIncludeCodeSnippet(): def transformIncludeCodeSnippet(self,query,baseRequest,userSettings={},pandasCode=""): #load code header - get keys apiCode = self.getApiCode(query,userSettings) #load request's code queryCode = self.getQueryCode(query,baseRequest,pandasCode) return(apiCode + queryCode) def getQueryCode(self,query,baseRequest,pandasCode=""): queryClean = {'url':query['url'],'params':query['params']} #passing only these two entries of query; params_dict is dropped. queryClean['url'] = 'url' queryClean['params']=queryClean['params'].replace(baseRequest['params']['api_key'],'{}')+'.format(key)' #replace explicit api key by the var "key" poiting to it. queryCode = '''\ query = {} retrivedData = requests.get(**query) {} #replace json by xml if this is the request format ''' queryCode = dedent(queryCode).format(json.dumps(queryClean),pandasCode) queryCode = queryCode.replace('"url": "url"', '"url": url') queryCode = queryCode.replace('.format(key)"', '".format(key)') queryCode = queryCode.replace('"UserID": "key"', '"UserID": key') #TODO: need to handle generic case, UserID, api_key... return(queryCode) def getApiCode(self,query,userSettings): r''' The base format of a code that can be used to replicate a driver using Requests directly. ''' try: url = query['url'] if userSettings: apiKeyPath = userSettings['ApiKeysPath'] apiKeyLabel = userSettings["ApiKeyLabel"] else: userSettings = generalSettings.getGeneralSettings( ).userSettings['ApiKeysPath'] apiKeyPath = userSettings['ApiKeysPath'] apiKeyLabel = userSettings["ApiKeyLabel"] except: url = " incomplete connection information " apiKeyPath = " incomplete connection information " #userSettings = utils.getUserSettings() #pkgConfig = utils.getPkgConfig() storagePref = apiKeyPath.split('.')[-1] passToCode = {'ApiKeyLabel': apiKeyLabel, "url":url, 'ApiKeysPath':apiKeyPath} #userSettings["ApiKeyLabel"] code = self.apiCodeOptions(storagePref) code = code.format(**passToCode) return(code) def apiCodeOptions(self,storagePref): r'''' storagePref: yaml, json, env ''' if storagePref == 'yaml': code = '''\ import requests import yaml import pandas as pd apiKeysFile = '{ApiKeysPath}' with open(apiKeysFile, 'r') as stream: apiInfo= yaml.safe_load(stream) url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key'] ''' elif storagePref == 'json': code = '''\ import requests import json import pandas as pd # json file should contain: {"BEA":{"key":"YOUR KEY","url": "{url}" } apiKeysFile = '{ApiKeysPath}' with open(apiKeysFile) as jsonFile: apiInfo = json.load(jsonFile) url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key'] ''' else: #default to env code = '''\ import requests import os import pandas as pd url = "{url}" key = os.getenv("{ApiKeyLabel}") ''' return(dedent(code)) def clipcode(self): r''' Copy the string to the user's clipboard (windows only) ''' try: pyperclip.copy(self._lastLoad['code']) except: print("Loaded session does not have a code entry. Re-run with verbose option set to True. eg: v.drivername(...,verbose=True)") class driverMetadata(): def __call__(self,dbGroup): if dbGroup == 'Categories': self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] elif dbGroup == 'Releases': self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] elif dbGroup == 'Series': self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] elif dbGroup == 'Sources': self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] elif dbGroup == 'Tags': self.metadata = [{ "displayName": "tags", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "tags", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] else: self.metadata = [{ "displayName": "datasetlist", # Name of driver main function - run with getattr(data,'datasetlist')() "method": "datasetlist", "params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''}, }] return(self.metadata) if __name__ == '__main__': case = driverCore(dbGroupName = 'Series') print(case('gdp',verbose=True))PK;ORTdatapungi_fed/drivers.py''' Drivers that do not fit the driverCore format. ''' import pandas as pd import requests import json from copy import deepcopy import pyperclip import math import re import inspect import yaml import itertools import warnings from datetime import datetime from datapungi_fed import generalSettings # NOTE: projectName #import generalSettings #NOTE: projectName from datapungi_fed import utils # NOTE: projectName #import utils #NOTE: projectName from datapungi_fed.driverCore import driverCore #from driverCore import driverCore class datasetlist(driverCore): def _query(self): ''' Returns name of available datasets, a short description and their query parameters. Args: none Output: - pandas table with query function name, database name, short description and query parameters. ''' #get all dictionary of all drivers (in config/datasetlist.yaml) datasetlist = self._dbParams datasetlistExp = [[{**entry, **dataset} for dataset in entry.pop('datasets')] for entry in datasetlist] datasetlistFlat = list(itertools.chain.from_iterable( datasetlistExp)) # flatten the array of array df_output = pd.DataFrame(datasetlistFlat) return(df_output) def __call__(self): return(self._query()) if __name__ == '__main__': d = datasetlist() v = d(); print(v)PK+O datapungi_fed/generalSettings.py''' .generalSettings ~~~~~~~~~~~~~~~~~ Loads general information: metadata of the datasource, metadata of the package's database drives (methods connecting to the databases of the datasource), and the datasource url and user api key. ''' from datapungi_fed import utils class getGeneralSettings(): #NOTE: write as a mixin? def __init__(self,connectionParameters={},userSettings={}): ''' sessionParameters - API key and the url (most used) of the datasource entry should look like: {'key': 'your key', 'description': 'BEA data', 'address': 'https://apps.bea.gov/api/data/'} userSettings - containg things like the path to api keys, preferred output format (json vs xml) datasourceOverview - a quick description of the datasource and its license packageMetadata - basic info on the package - to be used in a GUI or catalog of methods that read data. Also, "databases" will get automaticall updated with info on the methods that get specific dataset from the datasource. A typical entry should look like: { "displayName":"List of Datasets", "method" :"datasetlist", #NOTE run with getattr(data,'datasetlist')() "params" :{}, #No parameters in this case. } ''' #Load, for example, API Key and the (most used) path to the datasource self.userSettings = utils.getUserSettings(userSettings=userSettings) self.connectionParameters = utils.getConnectionParameters(connectionParameters,userSettings) self.baseRequest = getBaseRequest(self.connectionParameters,self.userSettings) self.datasourceOverview = getDatasourceOverview() self.packageMetadata = getPackageMetadata() def getBaseRequest(connectionParameters={},userSettings={}): ''' translate the connection parameters, a flat dictionary, to the format used by requests (or other connector), also, translate names to ones used by the datasource. ''' if userSettings == {}: userSettings = dict(file_type = 'JSON') print("result format was set to JSON since none could be found or was passed as a 'ResultFormat' in userSettings") output = { #this is, for example, the base of a requests' request - the drivers add to this. 'url' : connectionParameters['url'], 'params' :{ 'api_key' : connectionParameters['key'], 'file_type': userSettings["ResultFormat"] } } return(output) def getDatasourceOverview(): output = ''' Userguides: Licenses (always check with the data provider): Data used is sourced from Federal Reserve (FED) As stated on its website: - For more information, see: ''' return(output) def getPackageMetadata(): output = { "name": "datapungi_fed", "loadPackageAs" : "dpf", "apiClass": "data", "displayName": "FED", "description": "Access data from the Federal Reserve (FED)", "databases": [ #TODO: pass this to the driver, load the individual drivers metdata in the api. { "displayName":"categories", "method" :"categories", "params" :{}, #Parameters and default options. }, { "displayName":"tags", "method" :"tags", "params" :{ 'category_id': '125', 'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names' : '', 'exclude_tag_names':'', 'tag_group_id': '', 'search_text': '', 'limit':'', 'offset':'', 'order_by':'', 'sort_order':'' }, #Parameters and default options. }, ], } return(output)PK+O'3datapungi_fed/utils.py''' datapungi_fed.utils ~~~~~~~~~~~~~~~~~~ This module provides utility functions that are used within datapungibea and by the users when they want to update internal configs. ''' import json import pkg_resources import yaml import os def getConnectionParameters(connectionParameters = {}, userSettings = {}): ''' :param userSettings: (optional) dictionary of ``'ApiKeysPath': a path to json with API Keys`` and ``'ApiKeyLabel': label (key) of JSON entry containing the key`` If userSettings is an empty dictionary (default option), method will try to load it from saved userSettings. output, a dictionary with user key and datasource url ''' if not connectionParameters == {}: if isinstance(connectionParameters,str): #in this case, user only passes a key, no url pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) connectionParameters = {'url':pkgCfg['url'],'key':connectionParameters} return(connectionParameters) if userSettings == {}: userSettings = getUserSettings() try: storingMethod = userSettings['ApiKeysPath'].split('.')[-1] labelName = userSettings['ApiKeyLabel'] if storingMethod == 'json': with open(userSettings['ApiKeysPath']) as jsonFile: connectionParameters = (json.load(jsonFile))[labelName] elif storingMethod == 'yaml': with open(userSettings['ApiKeysPath'], 'r') as stream: pkgCfg = yaml.safe_load(stream) connectionParameters = pkgCfg[labelName] elif storingMethod =='env': #look for an environment variable called something like BEA_url url = os.getenv(labelName+'_url') if url == None: #if can't find it, load from the package config pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) url = pkgCfg['url'] connectionParameters = {'key':os.getenv(labelName),'url':url} return(connectionParameters) except: print('Could not find dictionary key ' + labelName + ' in \n '+ userSettings['ApiKeysPath']) return def getResourcePath(relativePath, resource_package = __name__): ''' Given relative, get its full path eg: relative path: /config/userSettings.json will return datapungibea path + relative path note: can replace resource_package with package name: eg: 'datapungi_fed' ''' fullPath = pkg_resources.resource_filename(resource_package, relativePath) return(fullPath) def getUserSettings(userSettings = {}): ''' loads the userSettings file. ''' if not userSettings == {}: return(userSettings) userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: userSettings = json.load(jsonFile) return(userSettings) except: print('.utils.py: Could not open the userSettings: \n ./config/userSettings.json \n returning empty dictionary') return({}) def getPkgConfig(): ''' Reads the PkgConfig - eg the default url ''' pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) return(pkgCfg) def setPkgConfig(newUrl): ''' save the default url of the api ''' if not isinstance(newUrl,str): print('Provide a string of the API URL') pass pkgcfgPath = getResourcePath("/config/pkgConfig.yaml") with open(pkgcfgPath, 'r') as stream: pkgCfg = yaml.safe_load(stream) pkgCfg['url'] = newUrl with open(pkgcfgPath, 'w') as outfile: yaml.dump(pkgCfg,outfile, default_flow_style=False) def setUserSettings(newPath): #TODO: check if still valid ''' sets the api key path in the package config file. eg: import datapungi_fed as dp dp.utils.setUserSettings('myPath') ''' userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: config = json.load(jsonFile) except: print('Could not open the configuration file: \n datapungi/config/userSettings.json') pass config['ApiKeysPath'] = newPath try: with open(userSettingsPath,'w') as jsonFile: json.dump(config,jsonFile) print('Path to the API Keys updated! New Path: \n' + config['ApiKeysPath']) except: print('Could not save the configuration to file: \n datapungi_fed/config/userSettings.json \n Path API Key not updated') pass def setKeyName(newName): #TODO: check if still valid ''' sets the api key name in the package config file. eg: import datapungi_fed as dp dp.utils.setKeyName('BEA_Secrete') ''' userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: config = json.load(jsonFile) except: print('Could not open the configuration file: \n datapungi_fed/config/userSettings.json') pass config["ApiKeyLabel"] = newName try: with open(userSettingsPath,'w') as jsonFile: json.dump(config,jsonFile) print('Name of the API Keys updated! New Name: \n' + config["ApiKeyLabel"]) except: print('Could not save the configuration to file: \n datapungibea/config/userSettings.json \n API Key Name not updated') pass def setTestFolder(newTestsPath): userSettingsPath = getResourcePath('/config/userSettings.json') try: with open(userSettingsPath) as jsonFile: config = json.load(jsonFile) except: print('Could not open the configuration file: \n datapungi/config/userSettings.json') pass config['TestsOutputPath'] = newTestsPath try: with open(userSettingsPath,'w') as jsonFile: json.dump(config,jsonFile) print('Path to the Tests Output Folder updated! New Path: \n' + config['TestsOutputPath']) except: print('Could not save the configuration to file: \n datapungibea/config/userSettings.json \n Path to the Tests Output not updated') pass if __name__ == '__main__': setTestFolder('U:/Tests')PK+Ogg#datapungi_fed/.vscode/settings.json{ "python.pythonPath": "C:\\Users\\jjott\\AppData\\Local\\Programs\\Python\\Python37\\python.exe" }PK+O datapungi_fed/config/__init__.pyPK9OԼ^l!l!%datapungi_fed/config/datasetlist.yaml- api: FRED group: Categories default query: category datasets: - { short name: category, database: category, json key: categories, description: Get a category., parameters: [api_key,file_type,category_id] } - { short name: children, database: category/children, json key: categories, description: Get the child categories for a specified parent category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end]} - { short name: related, database: category/related, json key: categories, description: Get the related categories for a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end]} - { short name: series, database: category/series, json key: seriess, description: Get the series in a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end,limit,offset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]} - { short name: tags, database: category/tags, json key: tags, description: Get the tags for a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end,tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: related_tags, database: category/related_tags, json key: tags, description: Get the related tags for a category., parameters: [api_key,file_type,category_id,realtime_start,realtime_end,tag_names,exclude_tag_names,tag_group_id,search_text,limit,offset,order_by]} - api: FRED group: Releases default query: releases datasets: - { short name: releases, database: releases, json key: releases, description: Get all releases of economic data., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - { short name: releases/dates, database: releases/dates, json key: release_dates, description: Get release dates for all releases of economic data., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,order_by,sort_order,include_release_dates_with_no_data]} - { short name: release, database: release, json key: releases, description: Get a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end]} - { short name: release/dates, database: release/dates, json key: release_dates, description: Get release dates for a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,limit,offset,sort_order,include_release_dates_with_no_data]} - { short name: release/series, database: release/series, json key: seriess, description: Get the series on a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,limit,offset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]} - { short name: release/sources, database: release/sources, json key: sources, description: Get the sources for a release of economic data., parameters: [api_key,file_type,release_id,realtime_start,realtime_end]} - { short name: release/tags, database: release/tags, json key: tags, description: Get the tags for a release., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: release/related_tags, database: release/related_tags, json key: tags, description: Get the related tags for a release., parameters: [api_key,file_type,release_id,realtime_start,realtime_end,tag_names,exclude_tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: release/tables, database: release/tables, json key: elements, description: Get the release tables for a given release., parameters: [api_key,file_type,release_id,element_id,include_observation_values,observation_date]} - api: FRED group: Series default query: observations datasets: - { short name: series, database: series, json key: seriess, description: Get an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end]} - { short name: categories, database: series/categories, json key: categories, description: Get the categories for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end]} - { short name: observations, database: series/observations, json key: observations, description: Get the observations or data values for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end,limit,offset,sort_order,observation_start,observation_end,units,frequency,aggregation_method,output_type,vintage_dates]} - { short name: release, database: series/release, json key: releases, description: Get the release for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end]} - { short name: search, database: series/search, json key: seriess, description: Get economic data series that match keywords., parameters: [api_key,file_type,search_text,search_type,realtime_start,realtime_end,limitoffset,order_by,sort_order,filter_variable,filter_value,tag_names,exclude_tag_names]} - { short name: search/tags, database: series/search/tags, json key: tags, description: Get the tags for a series search., parameters: [api_key,file_type,series_search_text,realtime_start,realtime_end,tag_names,tag_group_id,tag_search_text,limit,offset,order_by,sort_order]} - { short name: search/related_tags, database: series/search/related_tags, json key: tags, description: Get the related tags for a series search., parameters: [api_key,file_type,series_search_text,realtime_start,realtime_end,tag_names,exclude_tag_names,tag_group_id,tag_search_text,limit,offset,order_by,sort_order]} - { short name: tags, database: series/tags, json key: tags, description: Get the tags for an economic data series., parameters: [api_key,file_type,series_id,realtime_start,realtime_end,order_by,sort_order]} - { short name: updates, database: series/updates, json key: seriess, description: Get economic data series sorted by when observations were updated on the FRED® server., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,filter_value,start_time,end_time]} - { short name: vintagedates, database: series/vintagedates, json key: vintage_dates, description: Get the dates in history when a series' data values were revised or new data values were released., parameters: [api_key,file_type,series_id,realtime_start,realtime_end,limit,offset,sort_order]} - api: FRED group: Sources default query: source datasets: - { short name: sources, database: sources, json key: sources, description: Get all sources of economic data., parameters: [api_key,file_type,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - { short name: source, database: source, json key: sources, description: Get a source of economic data., parameters: [api_key,file_type,source_id,realtime_start,realtime_end]} - { short name: source/releases, database: source/releases, json key: releases, description: Get the releases for a source., parameters: [api_key,file_type,source_id,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - api: FRED group: Tags default query: related_tags datasets: - { short name: tags, database: tags, json key: tags, description: "Get all tags, search for tags, or get tags by name.", parameters: [api_key,file_type,realtime_start,realtime_end,tag_names,tag_group_id,search_text,limit,offset,order_by,sort_order]} - { short name: related_tags, database: related_tags, json key: tags, description: "Get the related tags for one or more tags.", parameters: [api_key,file_type,tag_names,exclude_tag_names,tag_group_id,search_text,realtime_start,realtime_end,limit,offset,order_by,sort_order]} - { short name: tags/series, database: tags/series, json key: seriess, description: Get the series matching tags., parameters: [api_key,file_type,tag_names,exclude_tag_names,realtime_start,realtime_end,limit,offset,order_by,sort_order]}PK+OG}T&&#datapungi_fed/config/pkgConfig.yamlurl : https://api.stlouisfed.org/fred/PK3Od0ll&datapungi_fed/config/userSettings.json{"ApiKeysPath": "env", "ApiKeyLabel": "API_KEY_FED", "ResultFormat": "json", "TestsOutputPath": "C:/Tests/"}PK+OB)--datapungi_fed/tests/__init__.pyfrom datapungi_fed.tests.main import runTestsPK+Okddatapungi_fed/tests/conftest.py# content of conftest.py import pytest def pytest_addoption(parser): parser.addoption( "--cmdopt", action="store", default="", help="enter API key" ) @pytest.fixture def cmdopt(request): return request.config.getoption("--cmdopt")PK+OXXdatapungi_fed/tests/main.pyimport subprocess import os from datapungi_fed.utils import getUserSettings def runTests(outputPath='',testsPath='',verbose = True): if not testsPath: testsPath = os.path.dirname(os.path.abspath(__file__)).replace("\\","/") print('**************************** \nWill run tests in: ' + testsPath) if not outputPath: outputPath = "U:/" try: settingsFile = getUserSettings() outputPath = settingsFile['TestsOutputPath'] except: print("Could not load TestOutputPath from user settings. Perhaps run util.setTestFolder( FilePath ) ") subprocess.Popen('pytest ' + testsPath + ' --html='+outputPath+'datapungi_fed_Tests.html --self-contained-html') if verbose: print('Tests will be saved in '+outputPath+'datapungi_fed_Tests.html \n****************************') if __name__ == '__main__': from sys import argv import subprocess import os runTests() #print(os.path.dirname(os.path.realpath(__file__))) #query = subprocess.Popen('pytest --html=datapungibea_Tests.html') #print(query)PK;O`̇JJ#datapungi_fed/tests/test_drivers.pyimport datapungi_fed as dp import time import pandas as pd import os def executeCode(stringIn): ''' auxiliary function for tests: get the requests code as a string and try to execute it. ''' try: exec(stringIn+'\n') #exec('print("hi")') # return(dict( codeRun = True, codeOutput = locals()['df_output'] )) #try to output the dataframe called df_output except: try: exec(stringIn) #if no dataframe called output, try to see it at least can exec the code return(dict(codeRun = True, codeOutput = pd.DataFrame([]))) except: return(dict(codeRun = False, codeOutput = pd.DataFrame([]))) # start the driver - used by all tests def startDriver(cmdopt): if not cmdopt == "": connectionParameters = {"key": cmdopt, "url": ""} else: connectionParameters = {} data = dp.data(connectionParameters) return(data) # content of test_sample.py def test_startDriver(cmdopt): data = startDriver(cmdopt) assert data ########################################################################################################### ### Tests of the categories dbgroup def test_categories(cmdopt): data = startDriver(cmdopt) driver = data.categories(125,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_categoriesLong(cmdopt): data = startDriver(cmdopt) driver = data.categories['category'](125,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_categoriesChildren(cmdopt): data = startDriver(cmdopt) driver = data.categories['children'](13,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_categoriesRelated(cmdopt): data = startDriver(cmdopt) driver = data.categories['related'](32073,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_categoriesSeries(cmdopt): data = startDriver(cmdopt) driver = data.categories['series'](125,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_categoriesTags(cmdopt): data = startDriver(cmdopt) driver = data.categories['tags'](125,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_categoriesRelatedTags(cmdopt): data = startDriver(cmdopt) driver = data.categories['related_tags'](125,tag_names="services;quarterly",verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the ########################################################################################################### ### Tests of the releases dbgroup def test_releases(cmdopt): data = startDriver(cmdopt) driver = data.releases(verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesLong(cmdopt): data = startDriver(cmdopt) driver = data.releases['releases'](verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesDates(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/dates'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesRelease(cmdopt): data = startDriver(cmdopt) driver = data.releases['release'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesReleaseDates(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/dates'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesReleaseSeries(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/series'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesReleaseSources(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/sources'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesReleaseTags(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/tags'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesReleaseRelatedTags(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/related_tags'](release_id='86',tag_names='sa;foreign',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_releasesReleaseTables(cmdopt): data = startDriver(cmdopt) driver = data.releases['release/tables'](release_id=53,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the ########################################################################################################### ## Test Series def test_series(cmdopt): data = startDriver(cmdopt) driver = data.series('gdp',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesSeries(cmdopt): data = startDriver(cmdopt) driver = data.series['series']('GDP',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesCategories(cmdopt): data = startDriver(cmdopt) driver = data.series['categories']('EXJPUS',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesObservations(cmdopt): #seriesLong data = startDriver(cmdopt) driver = data.series['observations']('GNP',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesRelease(cmdopt): data = startDriver(cmdopt) driver = data.series['release']('IRA',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesSearch(cmdopt): data = startDriver(cmdopt) driver = data.series['search'](search_text='monetary+service+index',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesSearchTags(cmdopt): data = startDriver(cmdopt) driver = data.series['search/tags'](series_search_text='monetary+service+index',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesSearchRelatedTags(cmdopt): data = startDriver(cmdopt) driver = data.series['search/related_tags'](series_search_text='mortgage+rate',tag_names='30-year;frb',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesTags(cmdopt): data = startDriver(cmdopt) driver = data.series['tags'](series_id='STLFSI',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesCategories(cmdopt): data = startDriver(cmdopt) driver = data.series['categories']('EXJPUS',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesUpdates(cmdopt): data = startDriver(cmdopt) driver = data.series['updates'](verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_seriesVintagedates(cmdopt): data = startDriver(cmdopt) driver = data.series['vintagedates']('GNPCA',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the ########################################################################################################### ## Test Tags def test_tags(cmdopt): data = startDriver(cmdopt) driver = data.tags(tag_names='monetary+aggregates;weekly',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the def test_tagsLong(cmdopt): data = startDriver(cmdopt) driver = data.tags['tags'](tag_names='monetary+aggregates;weekly',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. def test_tagsRelated(cmdopt): data = startDriver(cmdopt) driver = data.tags['related_tags'](tag_names='monetary+aggregates;weekly',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. def test_tagsSeries(cmdopt): data = startDriver(cmdopt) driver = data.tags['tags/series'](tag_names='slovenia;food;oecd',verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. if __name__ == '__main__': test_tags() #test_ #test_ #test_ #test_ #test_PK;Okϗff%datapungi_fed/tests/test_driversCI.pyimport datapungi_fed as dp import time import pandas as pd import os def executeCode(stringIn): ''' auxiliary function for tests: get the requests code as a string and try to execute it. ''' try: exec(stringIn+'\n') #exec('print("hi")') # return(dict( codeRun = True, codeOutput = locals()['df_output'] )) #try to output the dataframe called df_output except: try: exec(stringIn) #if no dataframe called output, try to see it at least can exec the code return(dict(codeRun = True, codeOutput = pd.DataFrame([]))) except: return(dict(codeRun = False, codeOutput = pd.DataFrame([]))) # start the driver - used by all tests def startDriver(cmdopt): global data if not cmdopt == "": connectionParameters = {"key": cmdopt, "url": "https://api.stlouisfed.org/fred/"} else: connectionParameters = {} data = dp.data(connectionParameters) return(data) def test_categories(cmdopt): data = startDriver(cmdopt) driver = data.categories(125,verbose=True) execCode = executeCode(driver['code']) assert driver['request'].status_code == 200 #test if connection was stablished assert not driver['dataFrame'].empty #cleaned up output is not empty assert execCode['codeRun'] #try to execute the code. assert execCode['codeOutput'].equals(driver['dataFrame']) #test if the output of the code equals the output of the if __name__ == '__main__': test_startDriver() test_categories() test_tags() #test_s() #test_() PK+O$` ..%datapungi_fed-0.1.3.dist-info/LICENSEMIT License Copyright (c) 2019 James Otterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.PK!HMuSa#datapungi_fed-0.1.3.dist-info/WHEEL HM K-*ϳR03rOK-J,/RH,szd&Y)r$[)T&UD"PK!H.7 p*&datapungi_fed-0.1.3.dist-info/METADATAZms6_oFOҴwj;74/vc2ODžHHBM,A4~.4n5T"ž>KUTr_UYmxߏ^\=/b/*sYmf[U[^*űJU%3ZYU=F$(TRb)j#46>L&\ߪK;N^581_L])}i^D~|ɛWaSL5Ct+N.xrؒϒ該I<1Ezr)AV)Ny"zIkB+pl&NΞòZZ̔9ȋZ֍DfJ>#)R;lRyמ%uJVU8̲yNx!eӦ tUa) ~tykk+e'OgOekE*o>X|ɪγ$*}6WɬivGd}$:?yz22MzWf,DR8oUgk]]M&_G.t? C\|ەVYjcm8qrAKbS-p_ӑ .Fgwֺkm'h+Y$ǹX2+@ڏ6q?Qfrk"]ZfRglTR'CHY){&f1ht@vcCM#ثQL ֽ4d'繭 ǀtb^7EA-if{:/~A!X-=C.iH1(Dn*% p u!W6 RX%ufm)cRzLMby]b{LA!oprfTϏ^Rt7cVcr11"C2EJ\#͵&~gפw={@#L mv{i4KSBL8G">Ju˲e#DȀ0Yf MHg3Eiyq[(;?&6~ %JR'SeL1Wpq#n _`3{ 5B(F]BSF$!WIûUtE" ZM>7[-bL<텪X3qƇ@8+YA} cibEsr>믰e-+?2?$wN]$Yaݥ p9U~@qt~EwO :% zEhH}Ն^E"k4dm/*H߮ԄLO:el0Rl;b =LTN+tfTQtAG"f-]T a=1=Ds ='E`ߩ\PGa҉{*krVE!ޘ 8" Hv=d9>wo|tå- ]K{t¶nm{9Wn^'}#+RӉa}Kd?.=t}Hv=5sL¾2ʉH6VGNm >`ye:F]s3&b+DֶT Q`)~PG 4+|ť|j#=1}dYà}f!z#Yd8 ];)30HoG%C^H%{\q%xR֍3rBn舤sm‘<Ҙ9iEhHf|r)51HϳZƱ ֿs5v=C?9ptq6>up1x^cb)Ww~ޙMqp;. <ɌRzg}~<rŒk̎gěbU`+m!35Ht4^.E|uSR)N/[孊LB2w&9BQ@)Mq)?7mmXy<8`v3@p4:Xv)f??CUڈ=9f;4=8C ᐼc f눵L]8 I4Ҩ(0pp7<찌ܕG$tBcH|y)@ wREX~d*[ScP¹/<-Bn}4INtL2xE0?!RV5}jfjL;%~օc}@SLP ,JhY~2hxqظ))L%Tᐾf:OnE} rW&m)p†j8 AΰϚ8tg"hն~rK<Ε i<>Ik֍B9"_uYGXz;2{{F _T ܽB0Zb!sk&Ӯ.#^e"}wE^--)DIMuE3Amc%hңTN%M4>eEo,gܟGؖVAnj>βʡxBXPkZ5TSd8*]UPEe0~4?b?v|4A\AW5! Een= R1O,uw/g٠T4 lL {g]3Fn<@.Yݭ.09NV.&ac 9u^\ SRܥK[ 'k{B\2?O0Al@|C OGfp9LłPK!HD$datapungi_fed-0.1.3.dist-info/RECORDɒJ}? T22)ʐL1AdAuo 27c>igUo/zqBiL8z[Ui AJ&w'MٻiXQ[M*"h]kU/Tȓ={Ge*hEAH5kVNϯIrEƣ=Y'CSe+ h5z[d¿?Ait]h ƙD,%It{y~u0· M6֕g6M r>~E q.+ ۇIHk]p;ޖH`Vn~[ JbÄM1q|L2is$ݖtHrBN$),Y{4;(yc0X/ TkV;l_&\ u5 &ؒI/o ϾdaJuAeP˥=$r»Qb|K KİPK9Ovdatapungi_fed/__init__.pyPK;Owǰn datapungi_fed/api.pyPK;O{EߪOO datapungi_fed/driverCore.pyPK;ORT\datapungi_fed/drivers.pyPK+O bdatapungi_fed/generalSettings.pyPK+O'3rdatapungi_fed/utils.pyPK+Ogg#datapungi_fed/.vscode/settings.jsonPK+O ddatapungi_fed/config/__init__.pyPK9OԼ^l!l!%datapungi_fed/config/datasetlist.yamlPK+OG}T&&#Qdatapungi_fed/config/pkgConfig.yamlPK3Od0ll&datapungi_fed/config/userSettings.jsonPK+OB)--hdatapungi_fed/tests/__init__.pyPK+OkdҰdatapungi_fed/tests/conftest.pyPK+OXX datapungi_fed/tests/main.pyPK;O`̇JJ#datapungi_fed/tests/test_drivers.pyPK;Okϗff%datapungi_fed/tests/test_driversCI.pyPK+O$` ..%cdatapungi_fed-0.1.3.dist-info/LICENSEPK!HMuSa# datapungi_fed-0.1.3.dist-info/WHEELPK!H.7 p*&h datapungi_fed-0.1.3.dist-info/METADATAPK!HD$datapungi_fed-0.1.3.dist-info/RECORDPK"