PK'.format( '' if self.valid else 'Expired ', str(self), ) def __str__(self): return str(self.access_token) def __unicode__(self): return str(self.access_token) @staticmethod def from_dict(obj): return AuthToken(**obj) @property def valid(self): """ Ensures a token is valid now, and for at least the next few seconds. """ return self.expiry > pendulum.now().add(seconds=TOKEN_VALIDITY_PADDING) class AuthTokenManager(object): def __init__(self, api_key, client_secret, auth_token=None): self.api_key = api_key self.client_secret = client_secret self.auth_token = auth_token def _fetch_token(self): res = requests.post(gen_url('oauth2', 'token'), data={ 'grant_type': GRANT_TYPE_CLIENT_CREDENTIALS, 'client_id': self.api_key, 'client_secret': self.client_secret, }) res.raise_for_status() return AuthToken.from_dict(res.json()) @property def token(self): """ A wrapper around AuthTokenManager.auth_token which will always return a currently-valid token (or raise a requests Exception) """ if not self.auth_token or not self.auth_token.valid: self.auth_token = self._fetch_token() return self.auth_token def request_headers(self): return { 'Api-Key': self.api_key, 'Authorization': 'Bearer {}'.format(self.token), } def flex_auth(api_key=None, client_secret=None, auth_token_manager=None): """ Takes either an AuthTokenManager (which is passed through), or an API Key and Client Secret (which is turned into an AuthTokenManager). Exists so endpoint wrappers can take either an ATM or raw creds at the call validation level, but only need to handle ATMs in the "real" functionality. This entire flow basically exists to make the REPL and one-off calls less boilerplatey. """ if auth_token_manager: return auth_token_manager if not (api_key and client_secret): raise ValueError('Either auth_token_manager or api_key+client_secret required') return AuthTokenManager(api_key, client_secret) PK[0-9]+)x(?P[0-9]+)') def format_image(image): if image.get('date_created') is not None: image['date_created'] = pendulum.parse(image['date_created']) if image.get('keywords') is not None: image['raw_keywords'] = image['keywords'] image['keywords'] = [kw['text'] for kw in image['keywords']] return image def format_video(video): if video.get('date_created') is not None: video['date_created'] = pendulum.parse(video['date_created']) if video.get('clip_length') is not None: fields = ('days', 'hours', 'minutes', 'seconds', 'frames') cl = [int(x) for x in video['clip_length'].split(':')] # Getty durations are provided as strings that can # omit zeroed leading fields. This forces those # missing fields to be parsed as zero, avoiding # an IndexError in the old implementation which # blindly used `cl[2]` and similar # # https://stackoverflow.com/a/13085898 video['clip_length'] = pendulum.duration( **{ k: v for k, v in zip_longest( reversed(fields), reversed(cl), fillvalue=0, ) # discard frames - pendulum Duration objects # obviously don't support them. # Possible TODO would be to calculate milliseconds # off this value and the reported FPS of the # video # # Also remove any improperly parsed fields (somehow # the duration string had more sections than we can # handle, which should basically never happen) if k != 'frames' and k != 0 } ) if video.get('keywords') is not None: video['raw_keywords'] = video['keywords'] video['keywords'] = [kw['text'] for kw in video['keywords']] if video.get('mastered_to') is not None: video['parsed_dimensions'] = { k: int(v) for k, v in re.search( pattern=MASTERY_DIMENSIONS_REGEX, string=video['mastered_to'], ).groupdict().viewitems() } return video PK MAX_PAGE_SIZE: warnings.warn(dedent(""" search: Requested page_size {page_size} is greater than max {max_page_size}, using {max_page_size} """).format( page_size=page_size, max_page_size=MAX_PAGE_SIZE, ), APIPageSizeLimitExceeded) auth_token_manager = flex_auth( auth_token_manager=auth_token_manager, api_key=api_key, client_secret=client_secret, ) returned = 0 page_num = start_page page_size = min((page_size, MAX_PAGE_SIZE)) params = deepcopy(query_params) new_fields = fields.copy() if detailed: new_fields.add('detail_set') params['fields'] = ','.join(new_fields) while returned < max_results: try: page = _fetch_page( page=page_num, page_size=page_size, query_params=params, asset_type=asset_type, search_type=search_type, auth_token_manager=auth_token_manager, ) except requests.exceptions.HTTPError as e: if e.response.status_code == 400 and 'page must be equal to' in e.response.text: logger.warning('Got page must be equal to error') return raise for asset in page[asset_type]: yield asset_formatters[asset_type](asset) returned += 1 if returned >= max_results: return if len(page[asset_type]) < page_size: return page_num += 1 def all_videos(*args, **kwargs): kwargs['asset_type'] = 'videos' return search(*args, **kwargs) def creative_videos(*args, **kwargs): kwargs['search_type'] = 'creative' kwargs['asset_type'] = 'videos' return search(*args, **kwargs) def editorial_videos(*args, **kwargs): kwargs['search_type'] = 'editorial' kwargs['asset_type'] = 'videos' return search(*args, **kwargs) def all_images(*args, **kwargs): kwargs['asset_type'] = 'images' return search(*args, **kwargs) def creative_images(*args, **kwargs): kwargs['search_type'] = 'creative' kwargs['asset_type'] = 'images' return search(*args, **kwargs) def editorial_images(*args, **kwargs): kwargs['search_type'] = 'editorial' kwargs['asset_type'] = 'images' return search(*args, **kwargs) PKBoSi.Š$ZxnWyASo?*|ŌӬD>[Q*r8BF2'n~W7Up5Uޚ9XuM!Իsf_|| "o(ildm)qRI*5K>G䲰4 U@:nW0Zf1Nn" \&4dP!qc-U*`1rG|k= #Ok3Tpr w)UZ֔oiyF'oFU3y!Ri)-eOٚďQdYZʕ3-(w"8YB~ʄ m8]UxQ7{T.!