PKbF\senza/docker.pyimport requests def docker_image_exists(docker_image: str) -> bool: """ Check whether the docker image exists by calling the Docker registry REST API """ parts = docker_image.split('/') registry = parts[0] repo = '/'.join(parts[1:]) repo, tag = repo.split(':') for scheme in 'https', 'http': try: url = '{scheme}://{registry}/v1/repositories/{repo}/tags'.format(scheme=scheme, registry=registry, repo=repo) r = requests.get(url, timeout=5) result = r.json() return tag in result except: pass return False PKlGjsenza/utils.pyimport re import pystache def named_value(d): return next(iter(d.items())) def ensure_keys(dict, *keys): if len(keys) == 0: return dict else: first, rest = keys[0], keys[1:] if first not in dict: dict[first] = {} dict[first] = ensure_keys(dict[first], *rest) return dict def camel_case_to_underscore(name): ''' >>> camel_case_to_underscore('CamelCaseToUnderscore') 'camel_case_to_underscore' ''' s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() def pystache_render(*args, **kwargs): render = pystache.Renderer(missing_tags='strict') return render.render(*args, **kwargs) PK}[G( ´´ senza/cli.py#!/usr/bin/env python3 import calendar import collections import configparser import datetime import functools import importlib import ipaddress import os import re import sys import json from urllib.error import URLError import dns.resolver import time from subprocess import call import click from clickclick import AliasedGroup, Action, choice, info, FloatRange, OutputFormat, fatal_error from clickclick.console import print_table import requests import yaml import base64 import boto3 from botocore.exceptions import NoCredentialsError, ClientError from .aws import parse_time, get_required_capabilities, resolve_topic_arn, get_stacks, StackReference, matches_any, \ get_account_id, get_account_alias, get_tag from .components import get_component, evaluate_template import senza from urllib.request import urlopen from urllib.parse import quote from .traffic import change_version_traffic, print_version_traffic, get_records from .utils import named_value, camel_case_to_underscore, pystache_render, ensure_keys from pprint import pformat CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) STYLES = { 'RUNNING': {'fg': 'green'}, 'TERMINATED': {'fg': 'red'}, 'DELETE_COMPLETE': {'fg': 'red'}, 'ROLLBACK_COMPLETE': {'fg': 'red'}, 'CREATE_COMPLETE': {'fg': 'green'}, 'CREATE_FAILED': {'fg': 'red'}, 'PENDING': {'fg': 'yellow', 'bold': True}, 'CREATE_IN_PROGRESS': {'fg': 'yellow', 'bold': True}, 'DELETE_IN_PROGRESS': {'fg': 'red', 'bold': True}, 'STOPPING': {'fg': 'red'}, 'STOPPED': {'fg': 'red', 'bold': True}, 'SHUTTING_DOWN': {'fg': 'red', 'bold': True}, 'ROLLBACK_IN_PROGRESS': {'fg': 'red', 'bold': True}, 'UPDATE_COMPLETE': {'fg': 'green'}, 'UPDATE_ROLLBACK_IN_PROGRESS': {'fg': 'red', 'bold': True}, 'UPDATE_IN_PROGRESS': {'fg': 'yellow', 'bold': True}, 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS': {'fg': 'red', 'bold': True}, 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS': {'fg': 'yellow', 'bold': True}, 'UPDATE_FAILED': {'fg': 'red'}, 'UPDATE_ROLLBACK_COMPLETE': {'fg': 'red'}, 'IN_SERVICE': {'fg': 'green'}, 'OUT_OF_SERVICE': {'fg': 'red'}, 'OK': {'fg': 'green'}, 'ERROR': {'fg': 'red'}, } TITLES = { 'creation_time': 'Created', 'LogicalResourceId': 'Resource ID', 'launch_time': 'Launched', 'ResourceStatus': 'Status', 'ResourceStatusReason': 'Status Reason', 'lb_status': 'LB Status', 'private_ip': 'Private IP', 'public_ip': 'Public IP', 'resource_id': 'Resource ID', 'instance_id': 'Instance ID', 'version': 'Ver.', 'total_instances': 'Inst.#', 'running_instances': 'Running', 'docker_source': 'Docker Image Source', 'healthy_instances': 'Healthy', 'http_status': 'HTTP', 'main_dns': 'Main DNS', 'id': 'ID', 'ImageId': 'Image ID', 'OwnerId': 'Owner' } MAX_COLUMN_WIDTHS = { 'description': 50, 'stacks': 20, 'ResourceStatusReason': 50 } def print_json(data, output=None): if output == 'yaml': parsed_data = yaml.safe_load(data) print(yaml.safe_dump(parsed_data, indent=4, default_flow_style=False)) else: print(data) class DefinitionParamType(click.ParamType): name = 'definition' def convert(self, value, param, ctx): if isinstance(value, str): try: url = value if '://' in value else 'file://{}'.format(quote(os.path.abspath(value))) # if '://' not in value: # url = 'file://{}'.format(quote(os.path.abspath(value))) response = urlopen(url) data = yaml.safe_load(response.read()) except URLError: self.fail('"{}" not found'.format(value), param, ctx) else: data = value for key in ['SenzaInfo']: if 'SenzaInfo' not in data: self.fail('"{}" entry is missing in YAML file "{}"'.format(key, value), param, ctx) return data class KeyValParamType(click.ParamType): ''' >>> KeyValParamType().convert(('a', 'b'), None, None) ('a', 'b') ''' name = 'key_val' def convert(self, value, param, ctx): if isinstance(value, str): try: key, val = value.split('=', 1) except: self.fail('invalid key value parameter "{}" (must be KEY=VAL)'.format(value)) key_val = (key, val) else: key_val = value return key_val region_option = click.option('--region', envvar='AWS_DEFAULT_REGION', metavar='AWS_REGION_ID', help='AWS region ID (e.g. eu-west-1)') output_option = click.option('-o', '--output', type=click.Choice(['text', 'json', 'tsv']), default='text', help='Use alternative output format') json_output_option = click.option('-o', '--output', type=click.Choice(['json', 'yaml']), default='json', help='Use alternative output format') watch_option = click.option('-W', is_flag=True, help='Auto update the screen every 2 seconds') watchrefresh_option = click.option('-w', '--watch', type=click.IntRange(1, 300), metavar='SECS', help='Auto update the screen every X seconds') def watching(w: bool, watch: int): if w and not watch: watch = 2 if watch: click.clear() yield 0 if watch: while True: time.sleep(watch) click.clear() yield 0 # from AWS docs: # Stack name must contain only alphanumeric characters (case sensitive) # and start with an alpha character. Maximum length of the name is 255 characters. STACK_NAME_PATTERN = re.compile(r'^[a-zA-Z][a-zA-Z0-9-]*$') VERSION_PATTERN = re.compile(r'^[a-zA-Z0-9]+$') def validate_version(ctx, param, value): if not VERSION_PATTERN.match(value): raise click.BadParameter('Version must satisfy regular expression pattern "[a-zA-Z0-9]+"') return value DEFINITION = DefinitionParamType() KEY_VAL = KeyValParamType() BASE_TEMPLATE = { 'AWSTemplateFormatVersion': '2010-09-09' } def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return click.echo('Senza {}'.format(senza.__version__)) ctx.exit() def evaluate(definition, args, account_info, force: bool): # extract Senza* meta information info = definition.pop("SenzaInfo") info["StackVersion"] = args.version # replace Arguments and AccountInfo Variabales in info section info = yaml.load(evaluate_template(yaml.dump(info), {}, {}, args, account_info)) # add info as mappings # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html definition = ensure_keys(definition, "Mappings", "Senza", "Info") definition["Mappings"]["Senza"]["Info"] = info template = yaml.dump(definition, default_flow_style=False) definition = evaluate_template(template, info, [], args, account_info) definition = yaml.load(definition) components = definition.pop("SenzaComponents", []) # merge base template with definition BASE_TEMPLATE.update(definition) definition = BASE_TEMPLATE # evaluate all components for component in components: componentname, configuration = named_value(component) configuration["Name"] = componentname componenttype = configuration["Type"] componentfn = get_component(componenttype) if not componentfn: raise click.UsageError('Component "{}" does not exist'.format(componenttype)) definition = componentfn(definition, configuration, args, info, force, account_info) # throw executed template to templating engine and provide all information for substitutions template = yaml.dump(definition, default_flow_style=False) definition = evaluate_template(template, info, components, args, account_info) definition = yaml.load(definition) return definition def handle_exceptions(func): @functools.wraps(func) def wrapper(): try: func() except NoCredentialsError as e: sys.stdout.flush() sys.stderr.write('No AWS credentials found. ' + 'Use the "mai" command line tool to get a temporary access key\n') sys.stderr.write('or manually configure either ~/.aws/credentials ' + 'or AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY.\n') sys.exit(1) except ClientError as e: sys.stdout.flush() if is_credentials_expired_error(e): sys.stderr.write('AWS credentials have expired. ' + 'Use the "mai" command line tool to get a new temporary access key.\n') sys.exit(1) else: raise except: # Catch All sys.stdout.flush() raise return wrapper def is_credentials_expired_error(e: ClientError) -> bool: return e.response['Error']['Code'] in ['ExpiredToken', 'RequestExpired'] @click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS) @click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True, help='Print the current version number and exit.') def cli(): pass class TemplateArguments: def __init__(self, **kwargs): for key, val in kwargs.items(): setattr(self, key, val) class AccountArguments: ''' >>> test = AccountArguments('blubber', ... AccountID='123456', ... AccountAlias='testdummy', ... Domain='test.example.org.', ... TeamID='superteam') >>> test.AccountID '123456' >>> test.AccountAlias 'testdummy' >>> test.TeamID 'superteam' >>> test.Domain 'test.example.org' >>> test.Region 'blubber' >>> test.blubber Traceback (most recent call last): File "", line 1, in AttributeError: 'AccountArguments' object has no attribute 'blubber' ''' def __init__(self, region, **kwargs): setattr(self, '__Region', region) for key, val in kwargs.items(): setattr(self, '__' + key, val) @property def AccountID(self): attr = getattr(self, '__AccountID', None) if attr is None: accountid = get_account_id() setattr(self, '__AccountID', accountid) return accountid return attr @property def AccountAlias(self): attr = getattr(self, '__AccountAlias', None) if attr is None: accountalias = get_account_alias() setattr(self, '__AccountAlias', accountalias) return accountalias return attr @property def Region(self): return getattr(self, '__Region', None) @property def Domain(self): attr = getattr(self, '__Domain', None) if attr is None: conn = boto3.client('route53') domainlist = conn.list_hosted_zones()['HostedZones'] if len(domainlist) == 0: raise AttributeError('No Domain configured') elif len(domainlist) > 1: domain = choice('Please select the domain', sorted(domain['Name'].rstrip('.') for domain in domainlist)) else: domain = domainlist[0]['Name'].rstrip('.') domain = conn.list_hosted_zones()['HostedZones'][0]['Name'] setattr(self, '__Domain', domain) return domain return attr.rstrip('.') @property def TeamID(self): attr = getattr(self, '__TeamID', None) if attr is None: team_id = get_account_alias().split('-', maxsplit=1)[-1] setattr(self, '__TeamID', team_id) return team_id return attr @property def VpcID(self): attr = getattr(self, '__VpcID', None) if attr is None: ec2 = boto3.resource('ec2', self.Region) for vpc in ec2.vpcs.all(): # don't use the list from blow. .all() use a internal pageing! if vpc.is_default: setattr(self, '__VpcID', vpc.vpc_id) return vpc.vpc_id if getattr(self, '__VpcID', None) is None: vpclist = list(ec2.vpcs.all()) if len(vpclist) == 1: # Use the only one VPC if no default VPC found setattr(self, '__VpcID', vpclist[0].vpc_id) return vpclist[0].vpc_id elif len(vpclist) > 1: raise AttributeError('Multiple VPC only supportet with one default VPC!') else: raise AttributeError('Can\'t find any VPC!') return attr def parse_args(input, region, version, parameter, account_info): paras = {} defaults = collections.OrderedDict() parameterlist = [] # process positional parameters first seen_keyword = False for i, param in enumerate(input['SenzaInfo'].get('Parameters', [])): for key, config in param.items(): parameterlist.append(key) # collect all allowed keys and default values regardless paras[key] = None defaults[key] = config.get('Default', None) if defaults[key] is not None: defaults[key] = pystache_render(str(defaults[key]), {'AccountInfo': account_info}) if i < len(parameter): if '=' in parameter[i]: seen_keyword = True else: if seen_keyword: raise click.UsageError("Positional parameters must not follow keywords.") paras[key] = parameter[i] if len(paras) < len(parameter): raise click.UsageError('Too many parameters given. Need only: "{}"'.format(' '.join(parameterlist))) # process keyword parameters separately, if any if seen_keyword: for param in parameter: if '=' in param: key, value = param.split('=', 1) # split only on first = if key not in paras: raise click.UsageError('Unrecognized keyword parameter: "{}"'.format(key)) if paras[key] is not None: raise click.UsageError('Parameter specified multiple times: "{}"'.format(key)) paras[key] = value # finally, make sure every parameter got a value assigned, using defaults if given for key, defval in defaults.items(): paras[key] = paras[key] or defval if paras[key] is None: raise click.UsageError('Missing parameter "{}". Need: "{}"'.format(key, ' '.join(parameterlist))) args = TemplateArguments(region=region, version=version, **paras) return args def get_region(region): if not region: config = configparser.ConfigParser() try: config.read(os.path.expanduser('~/.aws/config')) if 'default' in config: region = config['default']['region'] except: pass if not region: raise click.UsageError('Please specify the AWS region on the command line (--region) or in ~/.aws/config') cf = boto3.client('cloudformation', region) if not cf: raise click.UsageError('Invalid region "{}"'.format(region)) return region def check_credentials(region): iam = boto3.client('iam') return iam.list_account_aliases() def get_stack_refs(refs: list): ''' >>> get_stack_refs(['foobar-stack']) [StackReference(name='foobar-stack', version=None)] >>> get_stack_refs(['foobar-stack', '1']) [StackReference(name='foobar-stack', version='1')] >>> get_stack_refs(['foobar-stack', '1', 'other-stack']) [StackReference(name='foobar-stack', version='1'), StackReference(name='other-stack', version=None)] >>> get_stack_refs(['foobar-stack', 'v1', 'v2', 'v99', 'other-stack']) [StackReference(name='foobar-stack', version='v1'), StackReference(name='foobar-stack', version='v2'), \ StackReference(name='foobar-stack', version='v99'), StackReference(name='other-stack', version=None)] ''' refs = list(refs) refs.reverse() stack_refs = [] last_stack = None while refs: ref = refs.pop() if last_stack is not None and re.compile(r'v[0-9][a-zA-Z0-9-]*$').match(ref): stack_refs.append(StackReference(last_stack, ref)) else: try: with open(ref) as fd: data = yaml.safe_load(fd) ref = data['SenzaInfo']['StackName'] except Exception as e: if not STACK_NAME_PATTERN.match(ref): # we can be sure that ref is a file path, # as stack names cannot contain dots or slashes raise click.FileError(ref, str(e)) if refs: version = refs.pop() else: version = None stack_refs.append(StackReference(ref, version)) last_stack = ref return stack_refs def all_with_version(stack_refs: list): ''' >>> all_with_version([StackReference(name='foobar-stack', version='1'), \ StackReference(name='other-stack', version=None)]) False >>> all_with_version([StackReference(name='foobar-stack', version='1'), \ StackReference(name='other-stack', version='v23')]) True >>> all_with_version([StackReference(name='foobar-stack', version='1')]) True >>> all_with_version([StackReference(name='other-stack', version=None)]) False ''' for ref in stack_refs: if not ref.version: return False return True @cli.command('list') @region_option @output_option @watch_option @watchrefresh_option @click.option('--all', is_flag=True, help='Show all stacks, including deleted ones') @click.argument('stack_ref', nargs=-1) def list_stacks(region, stack_ref, all, output, w, watch): '''List Cloud Formation stacks''' region = get_region(region) check_credentials(region) stack_refs = get_stack_refs(stack_ref) for _ in watching(w, watch): rows = [] for stack in get_stacks(stack_refs, region, all=all): rows.append({'stack_name': stack.name, 'version': stack.version, 'status': stack.StackStatus, 'creation_time': calendar.timegm(stack.CreationTime.timetuple()), 'description': stack.TemplateDescription}) rows.sort(key=lambda x: (x['stack_name'], x['version'])) with OutputFormat(output): print_table('stack_name version status creation_time description'.split(), rows, styles=STYLES, titles=TITLES) @cli.command() @click.argument('definition', type=DEFINITION) @click.argument('version', callback=validate_version) @click.argument('parameter', nargs=-1) @region_option @click.option('--disable-rollback', is_flag=True, help='Disable Cloud Formation rollback on failure') @click.option('--dry-run', is_flag=True, help='No-op mode: show what would be created') @click.option('-f', '--force', is_flag=True, help='Ignore failing validation checks') def create(definition, region, version, parameter, disable_rollback, dry_run, force): '''Create a new Cloud Formation stack from the given Senza definition file''' data = create_cf_template(definition, region, version, parameter, force) cf = boto3.client('cloudformation', region) with Action('Creating Cloud Formation stack {}..'.format(data['StackName'])) as act: try: if dry_run: info('**DRY-RUN** {}'.format(data['NotificationARNs'])) else: cf.create_stack(DisableRollback=disable_rollback, **data) except ClientError as e: if e.response['Error']['Code'] == 'AlreadyExistsException': act.fatal_error('Stack {} already exists. Please choose another version.'.format(data['StackName'])) else: raise @cli.command() @click.argument('definition', type=DEFINITION) @click.argument('version', callback=validate_version) @click.argument('parameter', nargs=-1) @region_option @click.option('--disable-rollback', is_flag=True, help='Disable Cloud Formation rollback on failure') @click.option('--dry-run', is_flag=True, help='No-op mode: show what would be created') @click.option('-f', '--force', is_flag=True, help='Ignore failing validation checks') def update(definition, region, version, parameter, disable_rollback, dry_run, force): '''Update an existing Cloud Formation stack from the given Senza definition file''' data = create_cf_template(definition, region, version, parameter, force) cf = boto3.client('cloudformation', region) with Action('Updating Cloud Formation stack {}..'.format(data['StackName'])) as act: try: if dry_run: info('**DRY-RUN** {}'.format(data['NotificationARNs'])) else: del(data['Tags']) cf.update_stack(**data) except ClientError as e: act.fatal_error('ClientError: {}'.format(pformat(e.response))) @cli.command('print') @click.argument('definition', type=DEFINITION) @click.argument('version', callback=validate_version) @click.argument('parameter', nargs=-1) @region_option @json_output_option @click.option('-f', '--force', is_flag=True, help='Ignore failing validation checks') def print_cfjson(definition, region, version, parameter, output, force): '''Print the generated Cloud Formation template''' data = create_cf_template(definition, region, version, parameter, force) print_json(data['TemplateBody'], output) def create_cf_template(definition, region, version, parameter, force): region = get_region(region) check_credentials(region) account_info = AccountArguments(region=region) args = parse_args(definition, region, version, parameter, account_info) with Action('Generating Cloud Formation template..'): data = evaluate(definition.copy(), args, account_info, force) stack_name = "{0}-{1}".format(data['Mappings']['Senza']['Info']['StackName'], data['Mappings']['Senza']['Info']['StackVersion']) if len(stack_name) > 128: fatal_error('Error: Stack name "{}" cannot exceed 128 characters. '.format(stack_name) + 'Please choose another name/version.') parameters = [] for name, parameter in data.get("Parameters", {}).items(): parameters.append({'ParameterKey': name, 'ParameterValue': getattr(args, name, None)}) tags = {} senza_tags = data['Mappings']['Senza']['Info'].get('Tags') if isinstance(senza_tags, dict): tags.update(senza_tags) elif isinstance(senza_tags, list): for tag in senza_tags: for key, value in tag.items(): # # As the SenzaInfo is not evaluated, we explicitly evaluate the values here tags[key] = evaluate_template(value, info, [], args, account_info) tags.update({ "Name": stack_name, "StackName": data['Mappings']['Senza']['Info']['StackName'], "StackVersion": data['Mappings']['Senza']['Info']['StackVersion'] }) tags_list = [] tags_mapping_list = [] for k, v in tags.items(): tags_list.append({'Key': k, 'Value': v}) tags_mapping_list.append({k: v}) data['Mappings']['Senza']['Info']['Tags'] = tags_mapping_list if "OperatorTopicId" in data['Mappings']['Senza']['Info']: topic = data['Mappings']['Senza']['Info']["OperatorTopicId"] topic_arn = resolve_topic_arn(region, topic) if not topic_arn: fatal_error('Error: SNS topic "{}" does not exist'.format(topic)) topics = [topic_arn] else: topics = [] capabilities = get_required_capabilities(data) cfjson = json.dumps(data, sort_keys=True, indent=4) return {'StackName': stack_name, 'TemplateBody': cfjson, 'Parameters': parameters, 'Tags': tags_list, 'NotificationARNs': topics, 'Capabilities': capabilities} @cli.command() @click.argument('stack_ref', nargs=-1) @region_option @click.option('--dry-run', is_flag=True, help='No-op mode: show what would be deleted') @click.option('-f', '--force', is_flag=True, help='Allow deleting multiple stacks') def delete(stack_ref, region, dry_run, force): '''Delete a single Cloud Formation stack''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) cf = boto3.client('cloudformation', region) if not stack_refs: raise click.UsageError('Please specify at least one stack') stacks = list(get_stacks(stack_refs, region)) if not all_with_version(stack_refs) and len(stacks) > 1 and not dry_run and not force: fatal_error('Error: {} matching stacks found. '.format(len(stacks)) + 'Please use the "--force" flag if you really want to delete multiple stacks.') for stack in stacks: with Action('Deleting Cloud Formation stack {}..'.format(stack.StackName)): if not dry_run: cf.delete_stack(StackName=stack.StackName) def format_resource_type(resource_type): if resource_type and resource_type.startswith('AWS::'): return resource_type[5:] return resource_type @cli.command() @click.argument('stack_ref', nargs=-1) @region_option @watch_option @watchrefresh_option @output_option def resources(stack_ref, region, w, watch, output): '''Show all resources of a single Cloud Formation stack''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) cf = boto3.client('cloudformation', region) for _ in watching(w, watch): rows = [] for stack in get_stacks(stack_refs, region): resources = cf.describe_stack_resources(StackName=stack.StackName)['StackResources'] for resource in resources: d = resource.copy() d['stack_name'] = stack.name d['version'] = stack.version d['resource_type'] = format_resource_type(d['ResourceType']) d['creation_time'] = calendar.timegm(resource['Timestamp'].timetuple()) rows.append(d) rows.sort(key=lambda x: (x['stack_name'], x['version'], x['LogicalResourceId'])) with OutputFormat(output): print_table('stack_name version LogicalResourceId resource_type ResourceStatus creation_time'.split(), rows, styles=STYLES, titles=TITLES) @cli.command() @click.argument('stack_ref', nargs=-1) @region_option @watch_option @watchrefresh_option @output_option def events(stack_ref, region, w, watch, output): '''Show all Cloud Formation events for a single stack''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) cf = boto3.client('cloudformation', region) for _ in watching(w, watch): rows = [] for stack in get_stacks(stack_refs, region): events = cf.describe_stack_events(StackName=stack.StackId)['StackEvents'] for event in events: d = event.copy() d['stack_name'] = stack.name d['version'] = stack.version d['resource_type'] = format_resource_type(d['ResourceType']) d['event_time'] = calendar.timegm(event['Timestamp'].timetuple()) rows.append(d) rows.sort(key=lambda x: x['event_time']) with OutputFormat(output): print_table(('stack_name version resource_type LogicalResourceId ' + 'ResourceStatus ResourceStatusReason event_time').split(), rows, styles=STYLES, titles=TITLES, max_column_widths=MAX_COLUMN_WIDTHS) def get_template_description(template: str): module = importlib.import_module('senza.templates.{}'.format(template)) return '{}: {}'.format(template, module.__doc__.strip()) @cli.command() @click.argument('definition_file', type=click.File('w')) @region_option @click.option('-t', '--template', help='Use a custom template', metavar='TEMPLATE_ID') @click.option('-v', '--user-variable', help='Provide user variables for the template', metavar='KEY=VAL', multiple=True, type=KEY_VAL) def init(definition_file, region, template, user_variable): '''Initialize a new Senza definition''' region = get_region(region) check_credentials(region) account_info = AccountArguments(region=region) templates = [] for mod in os.listdir(os.path.join(os.path.dirname(__file__), 'templates')): if not mod.startswith('_'): templates.append(mod.split('.')[0]) while template not in templates: template = choice('Please select the project template', [(t, get_template_description(t)) for t in sorted(templates)], default='webapp') module = importlib.import_module('senza.templates.{}'.format(template)) variables = {} for key_val in user_variable: key, val = key_val variables[key] = val variables = module.gather_user_variables(variables, region, account_info) with Action('Generating Senza definition file {}..'.format(definition_file.name)): definition = module.generate_definition(variables) definition_file.write(definition) def get_instance_health(elb, stack_name: str) -> dict: if stack_name is None: return {} instance_health = {} try: instance_states = elb.describe_instance_health(LoadBalancerName=stack_name)['InstanceStates'] for istate in instance_states: instance_health[istate['InstanceId']] = camel_case_to_underscore(istate['State']).upper() except ClientError as e: # ignore non existing ELBs # ignore ValidationError "LoadBalancer name cannot be longer than 32 characters" # ignore rate limit exceeded errors if e.response['Error']['Code'] not in ('LoadBalancerNotFound', 'ValidationError', 'Throttling'): raise return instance_health def get_instance_user_data(instance) -> dict: try: attrs = instance.describe_attribute(Attribute='userData') data_b64 = attrs['UserData']['Value'] data_yaml = base64.b64decode(data_b64) data_dict = yaml.load(data_yaml) return data_dict except Exception as e: # there's just too many ways this can fail, catch 'em all sys.stderr.write('Failed to query instance user data: {}\n'.format(e)) return {} def get_instance_docker_image_source(instance) -> str: return get_instance_user_data(instance).get('source', '') @cli.command() @click.argument('stack_ref', nargs=-1) @click.option('--all', is_flag=True, help='Show all instances, including instances not part of any stack') @click.option('--terminated', is_flag=True, help='Show instances in TERMINATED state') @click.option('-d', '--docker-image', is_flag=True, help='Show docker image source for every instance listed') @click.option('-p', '--piu', metavar='REASON', help='execute PIU request-access command') @click.option('-O', '--odd-host', help='Odd SSH bastion hostname', envvar='ODD_HOST', metavar='HOSTNAME') @region_option @output_option @watch_option @watchrefresh_option def instances(stack_ref, all, terminated, docker_image, piu, odd_host, region, output, w, watch): '''List the stack's EC2 instances''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) ec2 = boto3.resource('ec2', region) elb = boto3.client('elb', region) if all: filters = [] else: # filter out instances not part of any stack filters = [{'Name': 'tag-key', 'Values': ['aws:cloudformation:stack-name']}] opt_docker_column = ' docker_source' if docker_image else '' for _ in watching(w, watch): rows = [] for instance in ec2.instances.filter(Filters=filters): cf_stack_name = get_tag(instance.tags, 'aws:cloudformation:stack-name') stack_name = get_tag(instance.tags, 'StackName') stack_version = get_tag(instance.tags, 'StackVersion') if not stack_refs or matches_any(cf_stack_name, stack_refs): instance_health = get_instance_health(elb, cf_stack_name) if instance.state['Name'].upper() != 'TERMINATED' or terminated: docker_source = get_instance_docker_image_source(instance) if docker_image else '' rows.append({'stack_name': stack_name or '', 'version': stack_version or '', 'resource_id': get_tag(instance.tags, 'aws:cloudformation:logical-id'), 'instance_id': instance.id, 'public_ip': instance.public_ip_address, 'private_ip': instance.private_ip_address, 'state': instance.state['Name'].upper().replace('-', '_'), 'lb_status': instance_health.get(instance.id), 'docker_source': docker_source, 'launch_time': instance.launch_time.timestamp()}) rows.sort(key=lambda r: (r['stack_name'], r['version'], r['instance_id'])) with OutputFormat(output): print_table(('stack_name version resource_id instance_id public_ip ' + 'private_ip state lb_status{} launch_time'.format(opt_docker_column)).split(), rows, styles=STYLES, titles=TITLES) if piu is not None: for row in rows: if row['private_ip'] is not None: call(['piu', 'request-access', row['private_ip'], '{} via senza'.format(piu), '-O', odd_host]) @cli.command() @click.argument('stack_ref', nargs=-1) @region_option @output_option @watch_option @watchrefresh_option def status(stack_ref, region, output, w, watch): '''Show stack status information''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) ec2 = boto3.resource('ec2', region) elb = boto3.client('elb', region) cf = boto3.resource('cloudformation', region) for _ in watching(w, watch): rows = [] for stack in sorted(get_stacks(stack_refs, region)): instance_health = get_instance_health(elb, stack.StackName) main_dns_resolves = False http_status = None for res in cf.Stack(stack.StackId).resource_summaries.all(): if res.resource_type == 'AWS::Route53::RecordSet': name = res.physical_resource_id if not name: # physical resource ID will be empty during stack creation continue if 'version' in res.logical_id.lower(): try: requests.get('https://{}/'.format(name), timeout=2) http_status = 'OK' except: http_status = 'ERROR' else: try: answers = dns.resolver.query(name, 'CNAME') except: answers = [] for answer in answers: if answer.target.to_text().startswith('{}-'.format(stack.StackName)): main_dns_resolves = True instances = list(ec2.instances.filter(Filters=[{'Name': 'tag:aws:cloudformation:stack-id', 'Values': [stack.StackId]}])) rows.append({'stack_name': stack.name, 'version': stack.version, 'status': stack.StackStatus, 'total_instances': len(instances), 'running_instances': len([i for i in instances if i.state['Name'] == 'running']), 'healthy_instances': len([i for i in instance_health.values() if i == 'IN_SERVICE']), 'lb_status': ','.join(set(instance_health.values())), 'main_dns': main_dns_resolves, 'http_status': http_status }) with OutputFormat(output): print_table(('stack_name version status total_instances running_instances healthy_instances ' + 'lb_status http_status main_dns').split(), rows, styles=STYLES, titles=TITLES) @cli.command() @click.argument('stack_ref', nargs=-1) @region_option @output_option @watch_option @watchrefresh_option def domains(stack_ref, region, output, w, watch): '''List the stack's Route53 domains''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) cf = boto3.resource('cloudformation', region) records_by_name = {} for _ in watching(w, watch): rows = [] for stack in get_stacks(stack_refs, region): if stack.StackStatus == 'ROLLBACK_COMPLETE': # performance optimization: do not call EC2 API for "dead" stacks continue for res in cf.Stack(stack.StackId).resource_summaries.all(): if res.resource_type == 'AWS::Route53::RecordSet': name = res.physical_resource_id if name not in records_by_name: zone_name = name.split('.', 1)[1] for rec in get_records(zone_name): records_by_name[(rec['Name'].rstrip('.'), rec.get('SetIdentifier'))] = rec record = records_by_name.get((name, stack.StackName)) or records_by_name.get((name, None)) row = {'stack_name': stack.name, 'version': stack.version, 'resource_id': res.logical_id, 'domain': res.physical_resource_id, 'weight': None, 'type': None, 'value': None, 'create_time': calendar.timegm(res.last_updated_timestamp.timetuple())} if record: row.update({'weight': str(record.get('Weight', '')), 'type': record.get('Type'), 'value': ','.join([r['Value'] for r in record.get('ResourceRecords')])}) rows.append(row) with OutputFormat(output): print_table('stack_name version resource_id domain weight type value create_time'.split(), rows, styles=STYLES, titles=TITLES) @cli.command() @click.argument('stack_name') @click.argument('stack_version', required=False) @click.argument('percentage', type=FloatRange(0, 100, clamp=True), required=False) @region_option @output_option def traffic(stack_name, stack_version, percentage, region, output): '''Route traffic to a specific stack (weighted DNS record)''' stack_refs = get_stack_refs([stack_name, stack_version]) region = get_region(region) check_credentials(region) with OutputFormat(output): for ref in stack_refs: if percentage is None: print_version_traffic(ref, region) else: change_version_traffic(ref, percentage, region) @cli.command() @click.argument('stack_ref', nargs=-1) @click.option('--hide-older-than', help='Hide images older than X days (default: 21)', type=int, default=21, metavar='DAYS') @click.option('--show-instances', is_flag=True, help='Show EC2 instance IDs') @region_option @output_option def images(stack_ref, region, output, hide_older_than, show_instances): '''Show all used AMIs and available Taupage AMIs''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) ec2 = boto3.resource('ec2', region) instances_by_image = collections.defaultdict(list) for inst in ec2.instances.all(): if inst.state['Name'] == 'terminated': # do not count TERMINATED EC2 instances continue stack_name = get_tag(inst.tags, 'aws:cloudformation:stack-name') if not stack_refs or matches_any(stack_name, stack_refs): instances_by_image[inst.image_id].append(inst) images = {} for image in ec2.images.filter(ImageIds=list(instances_by_image.keys())): images[image.id] = image if not stack_refs: filters = [{'Name': 'name', 'Values': ['*Taupage-*']}, {'Name': 'state', 'Values': ['available']}] for image in ec2.images.filter(Filters=filters): images[image.id] = image rows = [] cutoff = datetime.datetime.now() - datetime.timedelta(days=hide_older_than) for image in images.values(): row = image.meta.data.copy() creation_time = parse_time(image.creation_date) row['creation_time'] = creation_time row['instances'] = ', '.join(sorted(i.id for i in instances_by_image[image.id])) row['total_instances'] = len(instances_by_image[image.id]) stacks = set() for instance in instances_by_image[image.id]: stack_name = get_tag(instance.tags, 'aws:cloudformation:stack-name') # EC2 instance might not be part of a CF stack if stack_name: stacks.add(stack_name) row['stacks'] = ', '.join(sorted(stacks)) # if creation_time > cutoff.timestamp() or row['total_instances']: rows.append(row) rows.sort(key=lambda x: x.get('Name')) with OutputFormat(output): cols = 'ImageId Name OwnerId Description stacks total_instances creation_time' if show_instances: cols = cols.replace('total_instances', 'instances') print_table(cols.split(), rows, titles=TITLES, max_column_widths=MAX_COLUMN_WIDTHS) def is_ip_address(x: str): ''' >>> is_ip_address(None) False >>> is_ip_address('127.0.0.1') True ''' try: ipaddress.ip_address(x) return True except: return False def get_console_line_style(line: str): ''' >>> get_console_line_style('foo') {} >>> get_console_line_style('ERROR:')['fg'] 'red' >>> get_console_line_style('WARNING:')['fg'] 'yellow' >>> get_console_line_style('SUCCESS:')['fg'] 'green' >>> get_console_line_style('INFO:')['bold'] True ''' if 'ERROR:' in line: return {'fg': 'red', 'bold': True} elif 'WARNING:' in line: return {'fg': 'yellow', 'bold': True} elif 'SUCCESS:' in line: return {'fg': 'green', 'bold': True} elif 'INFO:' in line: return {'bold': True} else: return {} def print_console(line: str): style = get_console_line_style(line) click.secho(line, **style) @cli.command() @click.argument('instance_or_stack_ref', nargs=-1) @click.option('-l', '--limit', help='Show last N lines of console output (default: 25)', type=int, default=25, metavar='N') @region_option @watch_option @watchrefresh_option def console(instance_or_stack_ref, limit, region, w, watch): '''Print EC2 instance console output. INSTANCE_OR_STACK_REF can be an instance ID, private IP address or stack name/version.''' if instance_or_stack_ref and all(x.startswith('i-') for x in instance_or_stack_ref): stack_refs = None filters = [{'Name': 'instance-id', 'Values': list(instance_or_stack_ref)}] elif instance_or_stack_ref and all(is_ip_address(x) for x in instance_or_stack_ref): stack_refs = None filters = [{'Name': 'private-ip-address', 'Values': list(instance_or_stack_ref)}] else: stack_refs = get_stack_refs(instance_or_stack_ref) # filter out instances not part of any stack filters = [{'Name': 'tag-key', 'Values': ['aws:cloudformation:stack-name']}] region = get_region(region) check_credentials(region) ec2 = boto3.resource('ec2', region) for _ in watching(w, watch): for instance in ec2.instances.filter(Filters=filters): cf_stack_name = get_tag(instance.tags, 'aws:cloudformation:stack-name') if not stack_refs or matches_any(cf_stack_name, stack_refs): output = {} try: output = instance.console_output() except: pass click.secho('Showing last {} lines of {}/{}..'.format(limit, cf_stack_name, instance.private_ip_address or instance.id), bold=True) if isinstance(output, dict) and output.get('Output'): for line in output['Output'].split('\n')[-limit:]: print_console(line) @cli.command() @click.argument('stack_ref', nargs=-1) @region_option @json_output_option def dump(stack_ref, region, output): '''Dump Cloud Formation template of existing stack''' stack_refs = get_stack_refs(stack_ref) region = get_region(region) check_credentials(region) cf = boto3.client('cloudformation', region) for stack in get_stacks(stack_refs, region): data = cf.get_template(StackName=stack.StackName)['TemplateBody'] cfjson = json.dumps(data, sort_keys=True, indent=4) print_json(cfjson, output) def main(): handle_exceptions(cli)() if __name__ == "__main__": main() PKxr[G]>>senza/traffic.pyfrom json import JSONEncoder import click from clickclick import warning, action, ok, print_table, Action import collections from .aws import get_stacks, StackReference, get_tag import boto3 PERCENT_RESOLUTION = 2 FULL_PERCENTAGE = PERCENT_RESOLUTION * 100 DNS_RR_CACHE = {} DNS_ZONE_CACHE = {} def get_weights(dns_names: list, identifier: str, all_identifiers) -> ({str: int}, int, int): """ For the given dns_name, get the dns record weights from provided dns record set followed by partial count and partial weight sum. Here partial means without the element that we are operating now on. """ partial_count = 0 partial_sum = 0 known_record_weights = {} for dns_name in dns_names: for r in get_records(dns_name.split('.', 1)[1]): if r['Type'] == 'CNAME' and r['Name'] == dns_name: if r['Weight']: w = int(r['Weight']) else: w = 0 known_record_weights[r['SetIdentifier']] = w if r['SetIdentifier'] != identifier and w > 0: # we should ignore all versions that do not get any traffic # not to put traffic on the disabled versions when redistributing traffic weights partial_sum += w partial_count += 1 if identifier not in known_record_weights: known_record_weights[identifier] = 0 for ident in all_identifiers: if ident not in known_record_weights: known_record_weights[ident] = 0 return known_record_weights, partial_count, partial_sum def calculate_new_weights(delta, identifier, known_record_weights, percentage): new_record_weights = {} deltas = {} for i, w in known_record_weights.items(): if i == identifier: n = percentage else: if percentage == FULL_PERCENTAGE: # other versions should be disabled if 100% of traffic is ordered for our version n = 0 else: if w > 0: # if old weight is not zero # do not allow it to be pushed below 1 n = int(max(1, w + delta)) else: # do not touch versions that had not been getting traffic before n = 0 new_record_weights[i] = n deltas[i] = n - known_record_weights[i] return new_record_weights, deltas def compensate(calculation_error, compensations, identifier, new_record_weights, partial_count, percentage, identifier_versions): """ Compensate for the rounding errors as well as for the fact, that we do not allow to bring down the minimal weights lower then minimal possible value not to disable traffic from the minimally configured versions (1) and we do not allow to add any values to the already disabled versions (0). """ # distribute the error on the versions, other then the current one assert partial_count part = calculation_error / partial_count if part > 0: part = int(max(1, part)) else: part = int(min(-1, part)) # avoid changing the older version distributions for i in sorted(new_record_weights.keys(), key=lambda x: identifier_versions[x], reverse=True): if i == identifier: continue nw = new_record_weights[i] + part if nw <= 0: # do not remove the traffic from the minimal traffic versions continue new_record_weights[i] = nw calculation_error -= part compensations[i] = part if calculation_error == 0: break if calculation_error != 0: adjusted_percentage = percentage + calculation_error compensations[identifier] = calculation_error calculation_error = 0 warning( ("Changing given percentage from {} to {} " + "because all other versions are already getting the possible minimum traffic").format( percentage / PERCENT_RESOLUTION, adjusted_percentage / PERCENT_RESOLUTION)) percentage = adjusted_percentage new_record_weights[identifier] = percentage assert calculation_error == 0 return percentage def set_new_weights(dns_names: list, identifier, lb_dns_name: str, new_record_weights, percentage): action('Setting weights for {dns_names}..', **vars()) dns_changes = {} for idx, dns_name in enumerate(dns_names): domain = dns_name.split('.', 1)[1] zone = get_zone(domain) did_the_upsert = False for r in get_records(domain): if r['Type'] == 'CNAME' and r['Name'] == dns_name: w = new_record_weights[r['SetIdentifier']] if w: if int(r['Weight']) != w: r['Weight'] = w if dns_changes.get(zone['Id']) is None: dns_changes[zone['Id']] = [] dns_changes[zone['Id']].append({'Action': 'UPSERT', 'ResourceRecordSet': r}) if identifier == r['SetIdentifier']: did_the_upsert = True else: if dns_changes.get(zone['Id']) is None: dns_changes[zone['Id']] = [] dns_changes[zone['Id']].append({'Action': 'DELETE', 'ResourceRecordSet': r.copy()}) if new_record_weights[identifier] > 0 and not did_the_upsert: if dns_changes.get(zone['Id']) is None: dns_changes[zone['Id']] = [] dns_changes[zone['Id']].append({'Action': 'UPSERT', 'ResourceRecordSet': {'Name': dns_name, 'Type': 'CNAME', 'SetIdentifier': identifier, 'Weight': new_record_weights[identifier], 'TTL': 20, 'ResourceRecords': [{'Value': lb_dns_name[idx]}]}}) if dns_changes: route53 = boto3.client('route53') for hosted_zone_id, change in dns_changes.items(): route53.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch={'Comment': 'Weight change of {}'.format(hosted_zone_id), 'Changes': change}) if sum(new_record_weights.values()) == 0: ok(' DISABLED') else: ok() else: ok(' not changed') def dump_traffic_changes(stack_name: str, identifier: str, identifier_versions: {str: str}, known_record_weights: {str: int}, new_record_weights: {str: int}, compensations: {str: int}, deltas: {str: int} ): """ dump changes to the traffic settings for the given versions """ rows = [ { 'stack_name': stack_name, 'version': identifier_versions.get(i), 'identifier': i, 'old_weight%': known_record_weights.get(i), # 'delta': (delta if new_record_weights[i] else 0 if i != identifier else forced_delta), 'delta': deltas[i], 'compensation': compensations.get(i), 'new_weight%': new_record_weights.get(i), } for i in known_record_weights.keys() ] full_switch = max(new_record_weights.values()) == FULL_PERCENTAGE for r in rows: d = r['delta'] c = r['compensation'] if full_switch and not d and c: d = -c r['delta'] = (d / PERCENT_RESOLUTION) if d else None r['old_weight%'] /= PERCENT_RESOLUTION r['new_weight%'] /= PERCENT_RESOLUTION r['compensation'] = (c / PERCENT_RESOLUTION) if c else None if identifier == r['identifier']: r['current'] = '<' return sorted(rows, key=lambda x: identifier_versions.get(x['identifier'], '')) def print_traffic_changes(message: list): print_table('stack_name version identifier old_weight% delta compensation new_weight% current'.split(), message) class StackVersion(collections.namedtuple('StackVersion', 'name version domain lb_dns_name notification_arns')): @property def identifier(self): return '{}-{}'.format(self.name, self.version) @property def dns_name(self): return ['{}.'.format(x) for x in self.domain] def get_stack_versions(stack_name: str, region: str): cf = boto3.resource('cloudformation', region) for stack in get_stacks([StackReference(name=stack_name, version=None)], region): if stack.StackStatus in ('ROLLBACK_COMPLETE', 'CREATE_FAILED'): continue details = cf.Stack(stack.StackId) lb_dns_name = [] domain = [] notification_arns = details.notification_arns for res in details.resource_summaries.all(): if res.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer': elb = boto3.client('elb', region) lbs = elb.describe_load_balancers(LoadBalancerNames=[res.physical_resource_id]) lb_dns_name.append(lbs['LoadBalancerDescriptions'][0]['DNSName']) elif res.resource_type == 'AWS::Route53::RecordSet': if 'version' not in res.logical_id.lower(): domain.append(res.physical_resource_id) yield StackVersion(stack_name, get_tag(details.tags, 'StackVersion'), domain, lb_dns_name, notification_arns) def get_version(versions: list, version: str): for ver in versions: if ver.version == version: return ver raise click.UsageError('Stack version {} not found'.format(version)) def get_zone(domain: str): domain = '{}.'.format(domain.rstrip('.')) if DNS_ZONE_CACHE.get(domain) is None: route53 = boto3.client('route53') zone = list(filter(lambda x: x['Name'] == domain, route53.list_hosted_zones_by_name(DNSName=domain)['HostedZones']) ) if not zone: raise ValueError('Zone {} not found'.format(domain)) DNS_ZONE_CACHE[domain] = zone[0] return DNS_ZONE_CACHE[domain] def get_records(domain: str): domain = '{}.'.format(domain.rstrip('.')) if DNS_RR_CACHE.get(domain) is None: zone = get_zone(domain) route53 = boto3.client('route53') result = route53.list_resource_record_sets(HostedZoneId=zone['Id']) records = result['ResourceRecordSets'] while result['IsTruncated']: recordfilter = {'HostedZoneId': zone['Id'], 'StartRecordName': result['NextRecordName'], 'StartRecordType': result['NextRecordType'] } if result.get('NextRecordIdentifier'): recordfilter['StartRecordIdentifier'] = result.get('NextRecordIdentifier') result = route53.list_resource_record_sets(**recordfilter) records.extend(result['ResourceRecordSets']) DNS_RR_CACHE[domain] = records return DNS_RR_CACHE[domain] def print_version_traffic(stack_ref: StackReference, region): versions = list(get_stack_versions(stack_ref.name, region)) identifier_versions = collections.OrderedDict( (version.identifier, version.version) for version in versions) if stack_ref.version: version = get_version(versions, stack_ref.version) elif versions: version = versions[0] else: raise click.UsageError('No stack version of "{}" found'.format(stack_ref.name)) if not version.domain: raise click.UsageError('Stack {} version {} has no domain'.format(version.name, version.version)) known_record_weights, partial_count, partial_sum = get_weights(version.dns_name, version.identifier, identifier_versions.keys()) rows = [ { 'stack_name': version.name, 'version': identifier_versions.get(i), 'identifier': i, 'weight%': known_record_weights[i], } for i in known_record_weights.keys() ] for r in rows: r['weight%'] /= PERCENT_RESOLUTION if version.identifier == r['identifier']: r['current'] = '<' cols = 'stack_name version identifier weight%'.split() if stack_ref.version: cols.append('current') print_table(cols, sorted(rows, key=lambda x: identifier_versions.get(x['identifier'], ''))) def change_version_traffic(stack_ref: StackReference, percentage: float, region): versions = list(get_stack_versions(stack_ref.name, region)) arns = [] for v in versions: arns = arns + v.notification_arns identifier_versions = collections.OrderedDict( (version.identifier, version.version) for version in versions) version = get_version(versions, stack_ref.version) identifier = version.identifier if not version.domain: raise click.UsageError('Stack {} version {} has no domain'.format(version.name, version.version)) percentage = int(percentage * PERCENT_RESOLUTION) known_record_weights, partial_count, partial_sum = get_weights(version.dns_name, identifier, identifier_versions.keys()) if partial_count == 0 and percentage == 0: # disable the last remaining version new_record_weights = {i: 0 for i in known_record_weights.keys()} message = 'DNS record "{dns_name}" will be removed from that stack'.format(dns_name=version.dns_name) ok(msg=message) else: with Action('Calculating new weights..'): compensations = {} if partial_count: delta = int((FULL_PERCENTAGE - percentage - partial_sum) / partial_count) else: delta = 0 if percentage > 0: # will put the only last version to full traffic percentage compensations[identifier] = FULL_PERCENTAGE - percentage percentage = int(FULL_PERCENTAGE) new_record_weights, deltas = calculate_new_weights(delta, identifier, known_record_weights, percentage) total_weight = sum(new_record_weights.values()) calculation_error = FULL_PERCENTAGE - total_weight if calculation_error and calculation_error < FULL_PERCENTAGE: percentage = compensate(calculation_error, compensations, identifier, new_record_weights, partial_count, percentage, identifier_versions) assert sum(new_record_weights.values()) == FULL_PERCENTAGE message = dump_traffic_changes(stack_ref.name, identifier, identifier_versions, known_record_weights, new_record_weights, compensations, deltas) print_traffic_changes(message) inform_sns(arns, message, region) set_new_weights(version.dns_name, identifier, version.lb_dns_name, new_record_weights, percentage) def inform_sns(arns: list, message: str, region): jsonizer = JSONEncoder() sns_topics = set(arns) sns = boto3.client('sns') for sns_topic in sns_topics: sns.publish(TopicArn=sns_topic, Subject="SenzaTrafficRedirect", Message=jsonizer.encode((message))) PKbF9DBBsenza/__main__.pyimport senza.cli if __name__ == '__main__': senza.cli.main() PKNGVd senza/aws.pyimport collections import datetime import functools import time import boto3 from botocore.exceptions import ClientError def get_security_group(region: str, sg_name: str): ec2 = boto3.resource('ec2', region) try: return list(ec2.security_groups.filter(GroupNames=[sg_name]))[0] except ClientError as e: if e.response['Error']['Code'] == 'InvalidGroup.NotFound': return None elif e.response['Error']['Code'] == 'VPCIdNotSpecified': # no Default VPC, we must use the lng way... for sg in ec2.security_groups.all(): # FIXME: What if we have 2 VPC, with a SG with the same name?! if sg.group_name == sg_name: return sg return None else: raise def resolve_security_groups(security_groups: list, region: str): result = [] for security_group in security_groups: if isinstance(security_group, dict): result.append(security_group) elif security_group.startswith('sg-'): result.append(security_group) else: sg = get_security_group(region, security_group) if not sg: raise ValueError('Security Group "{}" does not exist'.format(security_group)) result.append(sg.id) return result def find_ssl_certificate_arn(region, pattern): '''Find the a matching SSL cert and return its ARN''' iam = boto3.resource('iam') candidates = set() certs = list(iam.server_certificates.all()) for cert in certs: # only consider matching SSL certs or use the only one available if pattern == cert.name or len(certs) == 1: candidates.add(cert.server_certificate_metadata['Arn']) if candidates: # return first match (alphabetically sorted return sorted(candidates)[0] else: return None def parse_time(s: str) -> float: ''' >>> parse_time('2015-04-14T19:09:01.000Z') > 0 True ''' try: utc = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%fZ') ts = time.time() utc_offset = datetime.datetime.fromtimestamp(ts) - datetime.datetime.utcfromtimestamp(ts) local = utc + utc_offset return local.timestamp() except: return None def get_required_capabilities(data: dict): '''Get capabilities for a given cloud formation template for the "create_stack" call >>> get_required_capabilities({}) [] >>> get_required_capabilities({'Resources': {'MyRole': {'Type': 'AWS::IAM::Role', 'a': 'b'}}}) ['CAPABILITY_IAM'] ''' capabilities = [] for logical_id, config in data.get('Resources', {}).items(): if config.get('Type').startswith('AWS::IAM'): capabilities.append('CAPABILITY_IAM') return capabilities def resolve_topic_arn(region, topic_name): ''' >>> resolve_topic_arn(None, 'arn:123') 'arn:123' ''' topic_arn = None if topic_name.startswith('arn:'): topic_arn = topic_name else: # resolve topic name to ARN sns = boto3.resource('sns', region) for topic in sns.topics.all(): if topic.arn.endswith(':{}'.format(topic_name)): topic_arn = topic.arn return topic_arn @functools.total_ordering class SenzaStackSummary: def __init__(self, stack): self.stack = stack parts = stack['StackName'].rsplit('-', 1) self.name = parts[0] if len(parts) > 1: self.version = parts[1] else: self.version = '' def __getattr__(self, item): if item in self.__dict__: return self.__dict__[item] return self.stack.get(item) def __lt__(self, other): def key(v): return (v.name, v.version) return key(self) < key(other) def __eq__(self, other): return self.stack['StackName'] == other.stack['StackName'] def get_stacks(stack_refs: list, region, all=False): # boto3.resource('cf')-stacks.filter() doesn't support status_filter, only StackName cf = boto3.client('cloudformation', region) if all: status_filter = [] else: # status_filter = [st for st in cf.valid_states if st != 'DELETE_COMPLETE'] status_filter = [ "CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE", "ROLLBACK_IN_PROGRESS", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE", "DELETE_IN_PROGRESS", "DELETE_FAILED", # "DELETE_COMPLETE", "UPDATE_IN_PROGRESS", "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_COMPLETE", "UPDATE_ROLLBACK_IN_PROGRESS", "UPDATE_ROLLBACK_FAILED", "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_ROLLBACK_COMPLETE" ] for stack in cf.list_stacks(StackStatusFilter=status_filter)['StackSummaries']: if not stack_refs or matches_any(stack['StackName'], stack_refs): yield SenzaStackSummary(stack) def matches_any(cf_stack_name: str, stack_refs: list): ''' >>> matches_any(None, [StackReference(name='foobar', version=None)]) False >>> matches_any('foobar-1', []) False >>> matches_any('foobar-1', [StackReference(name='foobar', version=None)]) True >>> matches_any('foobar-1', [StackReference(name='foobar', version='1')]) True >>> matches_any('foobar-1', [StackReference(name='foobar', version='2')]) False ''' for ref in stack_refs: if ref.version and cf_stack_name == ref.cf_stack_name(): return True elif not ref.version and (cf_stack_name or '').rsplit('-', 1)[0] == ref.name: return True return False def get_tag(tags: list, key: str, default=None): ''' >>> tags = [{'Key': 'aws:cloudformation:stack-id', ... 'Value': 'arn:aws:cloudformation:eu-west-1:123:stack/test-123'}, ... {'Key': 'Name', ... 'Value': 'test-123'}, ... {'Key': 'StackVersion', ... 'Value': '123'}] >>> get_tag(tags, 'StackVersion') '123' >>> get_tag(tags, 'aws:cloudformation:stack-id') 'arn:aws:cloudformation:eu-west-1:123:stack/test-123' >>> get_tag(tags, 'notfound') is None True ''' if isinstance(tags, list): found = [tag['Value'] for tag in tags if tag['Key'] == key] if len(found): return found[0] return default def get_account_id(): conn = boto3.client('iam') try: own_user = conn.get_user()['User'] except: own_user = None if not own_user: roles = conn.list_roles()['Roles'] if not roles: users = conn.list_users()['Users'] if not users: saml = conn.list_saml_providers()['SAMLProviderList'] if not saml: return None else: arn = [s['Arn'] for s in saml][0] else: arn = [u['Arn'] for u in users][0] else: arn = [r['Arn'] for r in roles][0] else: arn = own_user['Arn'] account_id = arn.split(':')[4] return account_id def get_account_alias(): conn = boto3.client('iam') return conn.list_account_aliases()['AccountAliases'][0] class StackReference(collections.namedtuple('StackReference', 'name version')): def cf_stack_name(self): return '{}-{}'.format(self.name, self.version) PK~[G9senza/__init__.py__version__ = '0.89' PK^LG.senza/components/taupage_auto_scaling_group.py import click import pierone.api import textwrap import yaml import json import sys import re from senza.components.auto_scaling_group import component_auto_scaling_group from senza.docker import docker_image_exists from senza.utils import ensure_keys _AWS_FN_RE = re.compile(r"('[{]{2} (.*?) [}]{2}')", re.DOTALL) def check_docker_image_exists(docker_image: pierone.api.DockerImage): if 'pierone' in docker_image.registry: try: exists = pierone.api.image_exists('pierone', docker_image) except pierone.api.Unauthorized: msg = textwrap.dedent(''' Unauthorized: Cannot check whether Docker image "{}" exists in Pier One Docker registry. Please generate a "pierone" OAuth access token using "pierone login". Alternatively you can skip this check using the "--force" option. '''.format(docker_image)).strip() raise click.UsageError(msg) else: exists = docker_image_exists(str(docker_image)) if not exists: raise click.UsageError('Docker image "{}" does not exist'.format(docker_image)) def component_taupage_auto_scaling_group(definition, configuration, args, info, force, account_info): # inherit from the normal auto scaling group but discourage user info and replace with a Taupage config if 'Image' not in configuration: configuration['Image'] = 'LatestTaupageImage' definition = component_auto_scaling_group(definition, configuration, args, info, force, account_info) taupage_config = configuration['TaupageConfig'] if 'notify_cfn' not in taupage_config: taupage_config['notify_cfn'] = {'stack': '{}-{}'.format(info["StackName"], info["StackVersion"]), 'resource': configuration['Name']} if 'application_id' not in taupage_config: taupage_config['application_id'] = info['StackName'] if 'application_version' not in taupage_config: taupage_config['application_version'] = info['StackVersion'] runtime = taupage_config.get('runtime') if runtime != 'Docker': raise click.UsageError('Taupage only supports the "Docker" runtime currently') source = taupage_config.get('source') if not source: raise click.UsageError('The "source" property of TaupageConfig must be specified') docker_image = pierone.api.DockerImage.parse(source) if not force and docker_image.registry: check_docker_image_exists(docker_image) userdata = generate_user_data(taupage_config) config_name = configuration["Name"] + "Config" ensure_keys(definition, "Resources", config_name, "Properties", "UserData") definition["Resources"][config_name]["Properties"]["UserData"]["Fn::Base64"] = userdata return definition def generate_user_data(taupage_config): """ Generates the CloudFormation "UserData" field. It looks for AWS functions such as Fn:: and Ref and generates the appropriate UserData json field, It leaves nodes representing AWS functions or refs unmodified and converts into text everything else. Example:: environment: S3_BUCKET: {"Ref": "ExhibitorBucket"} S3_PREFIX: exhibitor transforms into:: {"Fn::Join": ["", "environment:\n S3_BUCKET: ", {"Ref": "ExhibitorBucket"}, "\n S3_PREFIX: exhibitor"]} :param taupage_config: :return: """ def is_aws_fn(name): try: return name == "Ref" or (isinstance(name, str) and name.startswith("Fn::")) except: return False def transform(node): """Transform AWS functions and refs into an string representation for later split and substitution""" if isinstance(node, dict): num_keys = len(node) if num_keys > 0: key = next(iter(node.keys())) if num_keys == 1 and is_aws_fn(key): return "".join(["{{ ", json.dumps(node), " }}"]) else: return {key: transform(value) for key, value in node.items()} else: return node elif isinstance(node, list): return [transform(subnode) for subnode in node] else: return node def split(text): """Splits yaml text into text and AWS functions/refs""" parts = [] last_pos = 0 for m in _AWS_FN_RE.finditer(text): parts += [text[last_pos:m.start(1)], json.loads(m.group(2))] last_pos = m.end(1) parts += [text[last_pos:]] return parts yaml_text = yaml.dump(transform(taupage_config), width=sys.maxsize, default_flow_style=False) parts = split("#taupage-ami-config\n" + yaml_text) if len(parts) == 1: return parts[0] else: return {"Fn::Join": ["", parts]} PK!E5GCŭ6senza/components/weighted_dns_elastic_load_balancer.py from senza.components.elastic_load_balancer import component_elastic_load_balancer def component_weighted_dns_elastic_load_balancer(definition, configuration, args, info, force, account_info): if 'Domains' not in configuration: if 'MainDomain' in configuration: main_domain = configuration['MainDomain'] main_subdomain, main_zone = main_domain.split('.', 1) del configuration['MainDomain'] else: main_zone = account_info.Domain main_subdomain = info['StackName'] if 'VersionDomain' in configuration: version_domain = configuration['VersionDomain'] version_subdomain, version_zone = version_domain.split('.', 1) del configuration['VersionDomain'] else: version_zone = account_info.Domain version_subdomain = '{}-{}'.format(info['StackName'], info['StackVersion']) configuration['Domains'] = {'MainDomain': {'Type': 'weighted', 'Zone': '{}.'.format(main_zone.rstrip('.')), 'Subdomain': main_subdomain}, 'VersionDomain': {'Type': 'standalone', 'Zone': '{}.'.format(version_zone.rstrip('.')), 'Subdomain': version_subdomain}} return component_elastic_load_balancer(definition, configuration, args, info, force, account_info) PK}[Go ,senza/components/stups_auto_configuration.pyimport boto3 from senza.components.configuration import component_configuration from senza.utils import ensure_keys from senza.aws import get_tag def find_taupage_image(region: str): '''Find the latest Taupage AMI, first try private images, fallback to public''' ec2 = boto3.resource('ec2', region) filters = [{'Name': 'name', 'Values': ['*Taupage-AMI-*']}, {'Name': 'is-public', 'Values': ['false']}, {'Name': 'state', 'Values': ['available']}, {'Name': 'root-device-type', 'Values': ['ebs']}] images = list(ec2.images.filter(Filters=filters)) if not images: public_filters = [{'Name': 'name', 'Values': ['*Taupage-Public-AMI-*']}, {'Name': 'is-public', 'Values': ['true']}, {'Name': 'state', 'Values': ['available']}, {'Name': 'root-device-type', 'Values': ['ebs']}] images = list(ec2.images.filter(Filters=public_filters)) if not images: raise Exception('No Taupage AMI found') most_recent_image = sorted(images, key=lambda i: i.name)[-1] return most_recent_image def component_stups_auto_configuration(definition, configuration, args, info, force, account_info): ec2 = boto3.resource('ec2', args.region) availability_zones = configuration.get('AvailabilityZones') server_subnets = [] lb_subnets = [] lb_internal_subnets = [] for subnet in ec2.subnets.filter(Filters=[{'Name': 'vpc-id', 'Values': [account_info.VpcID]}]): name = get_tag(subnet.tags, 'Name', '') if availability_zones and subnet.availability_zone not in availability_zones: # skip subnet as it's not in one of the given AZs continue if 'dmz' in name: lb_subnets.append(subnet.id) elif 'internal' in name: lb_internal_subnets.append(subnet.id) server_subnets.append(subnet.id) else: server_subnets.append(subnet.id) if not lb_subnets: # no DMZ subnets were found, just use the same set for both LB and instances lb_subnets = server_subnets configuration = ensure_keys(configuration, "ServerSubnets", args.region) configuration["ServerSubnets"][args.region] = server_subnets configuration = ensure_keys(configuration, "LoadBalancerSubnets", args.region) configuration["LoadBalancerSubnets"][args.region] = lb_subnets configuration = ensure_keys(configuration, "LoadBalancerInternalSubnets", args.region) configuration["LoadBalancerInternalSubnets"][args.region] = lb_internal_subnets most_recent_image = find_taupage_image(args.region) configuration = ensure_keys(configuration, "Images", 'LatestTaupageImage', args.region) configuration["Images"]['LatestTaupageImage'][args.region] = most_recent_image.id component_configuration(definition, configuration, args, info, force, account_info) return definition PKNG<՜  !senza/components/configuration.py from senza.utils import ensure_keys, named_value def format_params(args): items = [(key, val) for key, val in args.__dict__.items() if key not in ('region', 'version')] return ', '.join(['{}: {}'.format(key, val) for key, val in items]) def get_default_description(info, args): return '{} ({})'.format(info['StackName'].title().replace('-', ' '), format_params(args)) def component_configuration(definition, configuration, args, info, force, account_info): # define parameters # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html if "Parameters" in info: definition = ensure_keys(definition, "Parameters") default_parameter = { "Type": "String" } for parameter in info["Parameters"]: name, value = named_value(parameter) value_default = default_parameter.copy() value_default.update(value) definition["Parameters"][name] = value_default if 'Description' not in definition: # set some sane default stack description definition['Description'] = get_default_description(info, args) # ServerSubnets for region, subnets in configuration.get('ServerSubnets', {}).items(): definition = ensure_keys(definition, "Mappings", "ServerSubnets", region) definition["Mappings"]["ServerSubnets"][region]["Subnets"] = subnets # LoadBalancerSubnets for region, subnets in configuration.get('LoadBalancerSubnets', {}).items(): definition = ensure_keys(definition, "Mappings", "LoadBalancerSubnets", region) definition["Mappings"]["LoadBalancerSubnets"][region]["Subnets"] = subnets # LoadBalancerInternalSubnets for region, subnets in configuration.get('LoadBalancerInternalSubnets', {}).items(): definition = ensure_keys(definition, "Mappings", "LoadBalancerInternalSubnets", region) definition["Mappings"]["LoadBalancerInternalSubnets"][region]["Subnets"] = subnets # Images for name, image in configuration.get('Images', {}).items(): for region, ami in image.items(): definition = ensure_keys(definition, "Mappings", "Images", region, name) definition["Mappings"]["Images"][region][name] = ami return definition PKXPOGCBubT&T&&senza/components/auto_scaling_group.pyimport click from senza.aws import resolve_security_groups, resolve_topic_arn from senza.utils import ensure_keys from senza.components.iam_role import get_merged_policies def component_auto_scaling_group(definition, configuration, args, info, force, account_info): definition = ensure_keys(definition, "Resources") # launch configuration config_name = configuration["Name"] + "Config" definition["Resources"][config_name] = { "Type": "AWS::AutoScaling::LaunchConfiguration", "Properties": { "InstanceType": configuration["InstanceType"], "ImageId": {"Fn::FindInMap": ["Images", {"Ref": "AWS::Region"}, configuration["Image"]]}, "AssociatePublicIpAddress": configuration.get('AssociatePublicIpAddress', False), "EbsOptimized": configuration.get('EbsOptimized', False) } } if 'BlockDeviceMappings' in configuration: definition['Resources'][config_name]['Properties']['BlockDeviceMappings'] = configuration['BlockDeviceMappings'] if "IamInstanceProfile" in configuration: definition["Resources"][config_name]["Properties"]["IamInstanceProfile"] = configuration["IamInstanceProfile"] if 'IamRoles' in configuration: logical_id = configuration['Name'] + 'InstanceProfile' roles = configuration['IamRoles'] if len(roles) > 1: for role in roles: if isinstance(role, dict): raise click.UsageError('Cannot merge policies of Cloud Formation references ({"Ref": ".."}): ' + 'You can use at most one IAM role with "Ref".') logical_role_id = configuration['Name'] + 'Role' definition['Resources'][logical_role_id] = { 'Type': 'AWS::IAM::Role', 'Properties': { "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] } ] }, 'Path': '/', 'Policies': get_merged_policies(roles) } } instance_profile_roles = [{'Ref': logical_role_id}] else: instance_profile_roles = roles definition['Resources'][logical_id] = { 'Type': 'AWS::IAM::InstanceProfile', 'Properties': { 'Path': '/', 'Roles': instance_profile_roles } } definition["Resources"][config_name]["Properties"]["IamInstanceProfile"] = {'Ref': logical_id} if "SecurityGroups" in configuration: definition["Resources"][config_name]["Properties"]["SecurityGroups"] = \ resolve_security_groups(configuration["SecurityGroups"], args.region) if "UserData" in configuration: definition["Resources"][config_name]["Properties"]["UserData"] = { "Fn::Base64": configuration["UserData"] } # auto scaling group asg_name = configuration["Name"] definition["Resources"][asg_name] = { "Type": "AWS::AutoScaling::AutoScalingGroup", # wait up to 15 minutes to get a signal from at least one server that it booted "CreationPolicy": { "ResourceSignal": { "Count": "1", "Timeout": "PT15M" } }, "Properties": { # for our operator some notifications "LaunchConfigurationName": {"Ref": config_name}, "VPCZoneIdentifier": {"Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]}, "Tags": [ # Tag "Name" { "Key": "Name", "PropagateAtLaunch": True, "Value": "{0}-{1}".format(info["StackName"], info["StackVersion"]) }, # Tag "StackName" { "Key": "StackName", "PropagateAtLaunch": True, "Value": info["StackName"], }, # Tag "StackVersion" { "Key": "StackVersion", "PropagateAtLaunch": True, "Value": info["StackVersion"] } ] } } if "OperatorTopicId" in info: definition["Resources"][asg_name]["Properties"]["NotificationConfiguration"] = { "NotificationTypes": [ "autoscaling:EC2_INSTANCE_LAUNCH", "autoscaling:EC2_INSTANCE_LAUNCH_ERROR", "autoscaling:EC2_INSTANCE_TERMINATE", "autoscaling:EC2_INSTANCE_TERMINATE_ERROR" ], "TopicARN": resolve_topic_arn(args.region, info["OperatorTopicId"]) } default_health_check_type = 'EC2' if "ElasticLoadBalancer" in configuration: if isinstance(configuration["ElasticLoadBalancer"], str): definition["Resources"][asg_name]["Properties"]["LoadBalancerNames"] = [ {"Ref": configuration["ElasticLoadBalancer"]}] elif isinstance(configuration["ElasticLoadBalancer"], list): definition["Resources"][asg_name]["Properties"]["LoadBalancerNames"] = [] for ref in configuration["ElasticLoadBalancer"]: definition["Resources"][asg_name]["Properties"]["LoadBalancerNames"].append({'Ref': ref}) # use ELB health check by default default_health_check_type = 'ELB' definition["Resources"][asg_name]['Properties']['HealthCheckType'] = \ configuration.get('HealthCheckType', default_health_check_type) definition["Resources"][asg_name]['Properties']['HealthCheckGracePeriod'] = \ configuration.get('HealthCheckGracePeriod', 300) if "AutoScaling" in configuration: definition["Resources"][asg_name]["Properties"]["MaxSize"] = configuration["AutoScaling"]["Maximum"] definition["Resources"][asg_name]["Properties"]["MinSize"] = configuration["AutoScaling"]["Minimum"] # ScaleUp policy definition["Resources"][asg_name + "ScaleUp"] = { "Type": "AWS::AutoScaling::ScalingPolicy", "Properties": { "AdjustmentType": "ChangeInCapacity", "ScalingAdjustment": "1", "Cooldown": "60", "AutoScalingGroupName": { "Ref": asg_name } } } # ScaleDown policy definition["Resources"][asg_name + "ScaleDown"] = { "Type": "AWS::AutoScaling::ScalingPolicy", "Properties": { "AdjustmentType": "ChangeInCapacity", "ScalingAdjustment": "-1", "Cooldown": "60", "AutoScalingGroupName": { "Ref": asg_name } } } metric_type = configuration["AutoScaling"]["MetricType"] metricfn = globals().get('metric_{}'.format(metric_type.lower())) if not metricfn: raise click.UsageError('Auto scaling MetricType "{}" not supported.'.format(metric_type)) definition = metricfn(asg_name, definition, configuration["AutoScaling"], args, info, force) else: definition["Resources"][asg_name]["Properties"]["MaxSize"] = 1 definition["Resources"][asg_name]["Properties"]["MinSize"] = 1 return definition def metric_cpu(asg_name, definition, configuration, args, info, force): if "ScaleUpThreshold" in configuration: definition["Resources"][asg_name + "CPUAlarmHigh"] = { "Type": "AWS::CloudWatch::Alarm", "Properties": { "MetricName": "CPUUtilization", "Namespace": "AWS/EC2", "Period": "300", "EvaluationPeriods": "2", "Statistic": "Average", "Threshold": configuration["ScaleUpThreshold"], "ComparisonOperator": "GreaterThanThreshold", "Dimensions": [ { "Name": "AutoScalingGroupName", "Value": {"Ref": asg_name} } ], "AlarmDescription": "Scale-up if CPU > {0}% for 10 minutes".format(configuration["ScaleUpThreshold"]), "AlarmActions": [ {"Ref": asg_name + "ScaleUp"} ] } } if "ScaleDownThreshold" in configuration: definition["Resources"][asg_name + "CPUAlarmLow"] = { "Type": "AWS::CloudWatch::Alarm", "Properties": { "MetricName": "CPUUtilization", "Namespace": "AWS/EC2", "Period": "300", "EvaluationPeriods": "2", "Statistic": "Average", "Threshold": configuration["ScaleDownThreshold"], "ComparisonOperator": "LessThanThreshold", "Dimensions": [ { "Name": "AutoScalingGroupName", "Value": {"Ref": asg_name} } ], "AlarmDescription": "Scale-down if CPU < {0}% for 10 minutes".format( configuration["ScaleDownThreshold"]), "AlarmActions": [ {"Ref": asg_name + "ScaleDown"} ] } } return definition PK!E5Gs+bbsenza/components/iam_role.py import boto3 from senza.utils import ensure_keys def get_merged_policies(roles: list): iam = boto3.resource('iam') policies = [] for rolename in roles: role = iam.Role(rolename) for policy in role.policies.all(): policies.append({'PolicyName': policy.policy_name, 'PolicyDocument': policy.policy_document}) return policies def component_iam_role(definition, configuration, args, info, force, account_info): definition = ensure_keys(definition, "Resources") role_name = configuration['Name'] definition['Resources'][role_name] = { 'Type': 'AWS::IAM::Role', 'Properties': { "AssumeRolePolicyDocument": configuration.get('AssumeRolePolicyDocument', { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] } ] }), 'Path': configuration.get('Path', '/'), 'Policies': configuration.get('Policies', []) + get_merged_policies( configuration.get('MergePoliciesFromIamRoles', [])) } } return definition PK!E5G)|6!senza/components/redis_cluster.py from senza.aws import resolve_security_groups from senza.utils import ensure_keys def component_redis_cluster(definition, configuration, args, info, force, account_info): name = configuration["Name"] definition = ensure_keys(definition, "Resources") number_of_nodes = int(configuration.get('NumberOfNodes', '2')) definition["Resources"]["RedisReplicationGroup"] = { "Type": "AWS::ElastiCache::ReplicationGroup", "Properties": { "AutomaticFailoverEnabled": True, "CacheNodeType": configuration.get('CacheNodeType', 'cache.t2.small'), "CacheSubnetGroupName": { "Ref": "RedisSubnetGroup" }, "Engine": "redis", "EngineVersion": configuration.get('EngineVersion', '2.8.19'), "CacheParameterGroupName": configuration.get('CacheParameterGroupName', 'default.redis2.8'), "NumCacheClusters": number_of_nodes, "CacheNodeType": configuration.get('CacheNodeType', 'cache.t2.small'), "SecurityGroupIds": resolve_security_groups(configuration["SecurityGroups"], args.region), "ReplicationGroupDescription": "Redis replicated cache cluster: " + name, } } definition["Resources"]["RedisSubnetGroup"] = { "Type": "AWS::ElastiCache::SubnetGroup", "Properties": { "Description": "Redis cluster subnet group", "SubnetIds": {"Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]} } } return definition PK!E5GF senza/components/redis_node.py from senza.aws import resolve_security_groups from senza.utils import ensure_keys def component_redis_node(definition, configuration, args, info, force, account_info): name = configuration["Name"] definition = ensure_keys(definition, "Resources") definition["Resources"]["RedisCacheCluster"] = { "Type": "AWS::ElastiCache::CacheCluster", "Properties": { "ClusterName": name, "Engine": "redis", "EngineVersion": configuration.get('EngineVersion', '2.8.19'), "CacheParameterGroupName": configuration.get('CacheParameterGroupName', 'default.redis2.8'), "NumCacheNodes": 1, "CacheNodeType": configuration.get('CacheNodeType', 'cache.t2.small'), "CacheSubnetGroupName": { "Ref": "RedisSubnetGroup" }, "VpcSecurityGroupIds": resolve_security_groups(configuration["SecurityGroups"], args.region) } } definition["Resources"]["RedisSubnetGroup"] = { "Type": "AWS::ElastiCache::SubnetGroup", "Properties": { "Description": "Redis cluster subnet group", "SubnetIds": {"Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]} } } return definition PKb6GqY)senza/components/elastic_load_balancer.py import click from clickclick import fatal_error from senza.aws import find_ssl_certificate_arn, resolve_security_groups SENZA_PROPERTIES = frozenset(['Domains', 'HealthCheckPath', 'HealthCheckPort', 'HealthCheckProtocol', 'HTTPPort', 'Name', 'SecurityGroups', 'SSLCertificateId', 'Type']) def get_load_balancer_name(stack_name: str, stack_version: str): ''' >>> get_load_balancer_name('a', '1') 'a-1' >>> get_load_balancer_name('toolong123456789012345678901234567890', '1') 'toolong12345678901234567890123-1' ''' # Loadbalancer name cannot exceed 32 characters, try to shorten l = 32 - len(stack_version) - 1 return '{}-{}'.format(stack_name[:l], stack_version) def component_elastic_load_balancer(definition, configuration, args, info, force, account_info): lb_name = configuration["Name"] # domains pointing to the load balancer main_zone = None for name, domain in configuration.get('Domains', {}).items(): name = '{}{}'.format(lb_name, name) definition["Resources"][name] = { "Type": "AWS::Route53::RecordSet", "Properties": { "Type": "CNAME", "TTL": 20, "ResourceRecords": [ {"Fn::GetAtt": [lb_name, "DNSName"]} ], "Name": "{0}.{1}".format(domain["Subdomain"], domain["Zone"]), "HostedZoneName": "{0}".format(domain["Zone"]) }, } if domain["Type"] == "weighted": definition["Resources"][name]["Properties"]['Weight'] = 0 definition["Resources"][name]["Properties"]['SetIdentifier'] = "{0}-{1}".format(info["StackName"], info["StackVersion"]) main_zone = domain['Zone'] ssl_cert = configuration.get('SSLCertificateId') pattern = None if not ssl_cert: if main_zone: pattern = main_zone.lower().rstrip('.').replace('.', '-') else: pattern = '' elif not ssl_cert.startswith('arn:'): pattern = ssl_cert if pattern is not None: ssl_cert = find_ssl_certificate_arn(args.region, pattern) if not ssl_cert: fatal_error('Could not find any matching SSL certificate for "{}"'.format(pattern)) health_check_protocol = "HTTP" allowed_health_check_protocols = ("HTTP", "TCP", "UDP", "SSL") if "HealthCheckProtocol" in configuration: health_check_protocol = configuration["HealthCheckProtocol"] if health_check_protocol not in allowed_health_check_protocols: raise click.UsageError('Protocol "{}" is not supported for LoadBalancer'.format(health_check_protocol)) health_check_path = "/ui/" if "HealthCheckPath" in configuration: health_check_path = configuration["HealthCheckPath"] health_check_port = configuration["HTTPPort"] if "HealthCheckPort" in configuration: health_check_port = configuration["HealthCheckPort"] health_check_target = "{0}:{1}{2}".format(health_check_protocol, health_check_port, health_check_path) if configuration.get('NameSufix'): loadbalancer_name = get_load_balancer_name(info["StackName"], '{}-{}'.format(info["StackVersion"], configuration['NameSufix'])) del(configuration['NameSufix']) else: loadbalancer_name = get_load_balancer_name(info["StackName"], info["StackVersion"]) loadbalancer_scheme = "internal" allowed_loadbalancer_schemes = ("internet-facing", "internal") if "Scheme" in configuration: loadbalancer_scheme = configuration["Scheme"] else: configuration["Scheme"] = loadbalancer_scheme if loadbalancer_scheme == 'internet-facing': click.secho('You are deploying an internet-facing ELB that will be publicly accessible! ' + 'You should have OAUTH2 and HTTPS in place!', fg='red', bold=True, err=True) if loadbalancer_scheme not in allowed_loadbalancer_schemes: raise click.UsageError('Scheme "{}" is not supported for LoadBalancer'.format(loadbalancer_scheme)) if loadbalancer_scheme == "internal": loadbalancer_subnet_map = "LoadBalancerInternalSubnets" else: loadbalancer_subnet_map = "LoadBalancerSubnets" # load balancer definition["Resources"][lb_name] = { "Type": "AWS::ElasticLoadBalancing::LoadBalancer", "Properties": { "Subnets": {"Fn::FindInMap": [loadbalancer_subnet_map, {"Ref": "AWS::Region"}, "Subnets"]}, "HealthCheck": { "HealthyThreshold": "2", "UnhealthyThreshold": "2", "Interval": "10", "Timeout": "5", "Target": health_check_target }, "Listeners": [ { "PolicyNames": [], "SSLCertificateId": ssl_cert, "Protocol": "HTTPS", "InstancePort": configuration["HTTPPort"], "LoadBalancerPort": 443 } ], "CrossZone": "true", "LoadBalancerName": loadbalancer_name, "SecurityGroups": resolve_security_groups(configuration["SecurityGroups"], args.region), "Tags": [ # Tag "Name" { "Key": "Name", "Value": "{0}-{1}".format(info["StackName"], info["StackVersion"]) }, # Tag "StackName" { "Key": "StackName", "Value": info["StackName"], }, # Tag "StackVersion" { "Key": "StackVersion", "Value": info["StackVersion"] } ] } } for key, val in configuration.items(): # overwrite any specified properties, but # ignore our special Senza properties as they are not supported by CF if key not in SENZA_PROPERTIES: definition['Resources'][lb_name]['Properties'][key] = val return definition PK|W.G~+csenza/components/__init__.pyimport importlib from senza.utils import camel_case_to_underscore, pystache_render def get_component(componenttype: str): '''Get component function by type name (e.g. "Senza::MyComponent")''' prefix, _, componenttype = componenttype.partition('::') root_package = camel_case_to_underscore(prefix) module_name = camel_case_to_underscore(componenttype) try: module = importlib.import_module('{}.components.{}'.format(root_package, module_name)) except ImportError: # component (module) not found return None function_name = 'component_{}'.format(module_name) return getattr(module, function_name) def evaluate_template(template, info, components, args, account_info): data = {"SenzaInfo": info, "SenzaComponents": components, "Arguments": args, "AccountInfo": account_info} result = pystache_render(template, data) return result PKb6G\qy))senza/templates/postgresapp.py''' HA Postgres app, which needs an S3 bucket to store WAL files ''' import click from clickclick import warning, error from senza.aws import get_security_group from senza.utils import pystache_render import requests from ._helper import prompt, check_security_group, check_s3_bucket, get_account_alias POSTGRES_PORT = 5432 HEALTHCHECK_PORT = 8008 SPILO_IMAGE_ADDRESS = "registry.opensource.zalan.do/acid/spilo-9.4" TEMPLATE = ''' # basic information for generating and executing this definition SenzaInfo: StackName: spilo {{^docker_image}} Parameters: - ImageVersion: Description: "Docker image version of spilo." {{/docker_image}} Tags: - SpiloCluster: "{{=<% %>=}}{{Arguments.version}}<%={{ }}=%>" # a list of senza components to apply to the definition SenzaComponents: # this basic configuration is required for the other components - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup # will create a launch configuration and auto scaling group with scaling triggers - AppServer: Type: Senza::TaupageAutoScalingGroup AutoScaling: Minimum: 3 Maximum: 3 MetricType: CPU InstanceType: {{instance_type}} {{#ebs_optimized}} EbsOptimized: True {{/ebs_optimized}} BlockDeviceMappings: - DeviceName: /dev/xvdk {{#use_ebs}} Ebs: VolumeSize: {{volume_size}} VolumeType: {{volume_type}} {{#snapshot_id}} SnapshotId: {{snapshot_id}} {{/snapshot_id}} {{#volume_iops}} Iops: {{volume_iops}} {{/volume_iops}} {{/use_ebs}} {{^use_ebs}} VirtualName: ephemeral0 {{/use_ebs}} ElasticLoadBalancer: - PostgresLoadBalancer - PostgresReplicaLoadBalancer HealthCheckType: EC2 SecurityGroups: - app-spilo IamRoles: - Ref: PostgresAccessRole AssociatePublicIpAddress: false # change for standalone deployment in default VPC TaupageConfig: runtime: Docker {{#docker_image}} source: {{docker_image}} {{/docker_image}} {{^docker_image}} source: "{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" {{/docker_image}} ports: {{postgres_port}}: {{postgres_port}} {{healthcheck_port}}: {{healthcheck_port}} etcd_discovery_domain: "{{discovery_domain}}" environment: SCOPE: "{{=<% %>=}}{{Arguments.version}}<%={{ }}=%>" ETCD_DISCOVERY_DOMAIN: "{{discovery_domain}}" WAL_S3_BUCKET: "{{wal_s3_bucket}}" root: True mounts: /home/postgres/pgdata: partition: /dev/xvdk filesystem: {{fstype}} erase_on_boot: true options: {{fsoptions}} {{#scalyr_account_key}} scalyr_account_key: {{scalyr_account_key}} {{/scalyr_account_key}} Resources: PostgresReplicaRoute53Record: Type: AWS::Route53::RecordSet Properties: Type: CNAME TTL: 20 HostedZoneName: {{hosted_zone}} Name: "{{=<% %>=}}{{Arguments.version}}<%={{ }}=%>-replica.{{hosted_zone}}" ResourceRecords: - Fn::GetAtt: - PostgresReplicaLoadBalancer - DNSName PostgresRoute53Record: Type: AWS::Route53::RecordSet Properties: Type: CNAME TTL: 20 HostedZoneName: {{hosted_zone}} Name: "{{=<% %>=}}{{Arguments.version}}<%={{ }}=%>.{{hosted_zone}}" ResourceRecords: - Fn::GetAtt: - PostgresLoadBalancer - DNSName PostgresReplicaLoadBalancer: Type: AWS::ElasticLoadBalancing::LoadBalancer Properties: CrossZone: true HealthCheck: HealthyThreshold: 2 Interval: 5 Target: HTTP:{{healthcheck_port}}/slave Timeout: 3 UnhealthyThreshold: 2 Listeners: - InstancePort: 5432 LoadBalancerPort: 5432 Protocol: TCP LoadBalancerName: "spilo-{{=<% %>=}}{{Arguments.version}}<%={{ }}=%>-replica" ConnectionSettings: IdleTimeout: 3600 SecurityGroups: - {{spilo_sg_id}} Scheme: internal Subnets: Fn::FindInMap: - LoadBalancerSubnets - Ref: AWS::Region - Subnets PostgresLoadBalancer: Type: AWS::ElasticLoadBalancing::LoadBalancer Properties: CrossZone: true HealthCheck: HealthyThreshold: 2 Interval: 5 Target: HTTP:{{healthcheck_port}}/master Timeout: 3 UnhealthyThreshold: 2 Listeners: - InstancePort: 5432 LoadBalancerPort: 5432 Protocol: TCP LoadBalancerName: "spilo-{{=<% %>=}}{{Arguments.version}}<%={{ }}=%>" ConnectionSettings: IdleTimeout: 3600 SecurityGroups: - {{spilo_sg_id}} Scheme: internal Subnets: Fn::FindInMap: - LoadBalancerSubnets - Ref: AWS::Region - Subnets PostgresAccessRole: Type: AWS::IAM::Role Properties: AssumeRolePolicyDocument: Version: "2012-10-17" Statement: - Effect: Allow Principal: Service: ec2.amazonaws.com Action: sts:AssumeRole Path: / Policies: - PolicyName: SpiloEC2S3Access PolicyDocument: Version: "2012-10-17" Statement: - Effect: Allow Action: s3:* Resource: - arn:aws:s3:::{{wal_s3_bucket}}/spilo/* - arn:aws:s3:::{{wal_s3_bucket}} - Effect: Allow Action: ec2:CreateTags Resource: "*" - Effect: Allow Action: ec2:Describe* Resource: "*" ''' def ebs_optimized_supported(instance_type): # per http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html return instance_type in ('c1.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'g2.2xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'm1.large', 'm1.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.xlarge', 'm3.2xlarge', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge') def gather_user_variables(variables, region, account_info): if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) else: variables['docker_image'] = None prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') variables['hosted_zone'] = account_info.Domain or 'example.com' if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]', default=True) else: variables['use_ebs'] = True variables['ebs_optimized'] = None variables['volume_iops'] = None variables['snapshot_id'] = None if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=10) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default='gp2') if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'. format(pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default="ext4") prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default="noatime,nodiratime,nobarrier") prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") variables['postgres_port'] = POSTGRES_PORT variables['healthcheck_port'] = HEALTHCHECK_PORT sg_name = 'app-spilo' rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'. format(sg_name)) if ('tcp', POSTGRES_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format( sg_name, POSTGRES_PORT )) if ('tcp', HEALTHCHECK_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'. format(sg_name, HEALTHCHECK_PORT)) variables['spilo_sg_id'] = get_security_group(region, sg_name).id check_s3_bucket(variables['wal_s3_bucket'], region) return variables def generate_definition(variables): definition_yaml = pystache_render(TEMPLATE, variables) return definition_yaml def get_latest_spilo_image(registry_url='https://registry.opensource.zalan.do', address='/teams/acid/artifacts/spilo-9.4/tags'): try: r = requests.get(registry_url + address) if r.ok: # sort the tags by creation date latest = None for entry in sorted(r.json(), key=lambda t: t['created'], reverse=True): tag = entry['name'] # try to avoid snapshots if possible if 'SNAPSHOT' not in tag: latest = tag break latest = latest or tag return "{0}:{1}".format(SPILO_IMAGE_ADDRESS, latest) except: pass return "" PK!E5G66senza/templates/webapp.py''' HTTP app with auto scaling, ELB and DNS ''' from clickclick import warning, error, choice from senza.utils import pystache_render from ._helper import prompt, confirm, check_security_group, check_iam_role, get_mint_bucket_name, check_value TEMPLATE = ''' # basic information for generating and executing this definition SenzaInfo: StackName: {{application_id}} Parameters: - ImageVersion: Description: "Docker image version of {{ application_id }}." # a list of senza components to apply to the definition SenzaComponents: # this basic configuration is required for the other components - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup # will create a launch configuration and auto scaling group with scaling triggers - AppServer: Type: Senza::TaupageAutoScalingGroup InstanceType: {{ instance_type }} SecurityGroups: - app-{{application_id}} IamRoles: - app-{{application_id}} ElasticLoadBalancer: AppLoadBalancer AssociatePublicIpAddress: false # change for standalone deployment in default VPC TaupageConfig: application_version: "{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" runtime: Docker source: "{{ docker_image }}:{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" health_check_path: {{http_health_check_path}} ports: {{http_port}}: {{http_port}} {{#mint_bucket}} mint_bucket: "{{ mint_bucket }}" {{/mint_bucket}} # creates an ELB entry and Route53 domains to this ELB - AppLoadBalancer: Type: Senza::WeightedDnsElasticLoadBalancer HTTPPort: {{http_port}} HealthCheckPath: {{http_health_check_path}} SecurityGroups: - app-{{application_id}}-lb Scheme: {{loadbalancer_scheme}} ''' def gather_user_variables(variables, region, account_info): # maximal 32 characters because of the loadbalancer-name prompt(variables, 'application_id', 'Application ID', default='hello-world', value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$')) prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")', default='stups/hello-world') prompt(variables, 'http_port', 'HTTP port', default=8080, type=int) prompt(variables, 'http_health_check_path', 'HTTP health check path', default='/') prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') if 'pierone' in variables['docker_image'] or confirm('Did you need OAuth-Credentials from Mint?'): prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region)) else: variables['mint_bucket'] = None variables['loadbalancer_scheme'] = choice(prompt='Please select the load balancer scheme', options=[('internal', 'internal: only accessible from the own VPC'), ('internet-facing', 'internet-facing: accessible from the public internet')], default='internal') http_port = variables['http_port'] sg_name = 'app-{}'.format(variables['application_id']) rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', http_port)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format( sg_name)) if ('tcp', http_port) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the specified HTTP port ({})'.format( sg_name, http_port )) rules_missing = check_security_group(sg_name + '-lb', [('tcp', 443)], region) if rules_missing: error('Load balancer security group {} does not allow inbound HTTPS traffic'.format(sg_name)) check_iam_role(variables['application_id'], variables['mint_bucket'], region) return variables def generate_definition(variables): definition_yaml = pystache_render(TEMPLATE, variables) return definition_yaml PKT6G_"senza/templates/_helper.pyimport boto3 import click import json import re from clickclick import Action from senza.aws import get_security_group, get_account_alias, get_account_id __author__ = 'hjacobs' def prompt(variables: dict, var_name, *args, **kwargs): if var_name not in variables: if callable(kwargs.get('default')): # evaluate callable kwargs['default'] = kwargs['default']() variables[var_name] = click.prompt(*args, **kwargs) def confirm(*args, **kwargs): return click.confirm(*args, **kwargs) def check_value(max_length: int, match_regex: str): def _value_checker(value: str): if len(value) <= max_length: if re.match(match_regex, value): return value else: raise click.UsageError('did not match regex {}.'.format(match_regex)) else: raise click.UsageError('Value is too long! {} > {} chars'.format(len(value), max_length)) return _value_checker def check_security_group(sg_name, rules, region, allow_from_self=False): rules_missing = set() for rule in rules: rules_missing.add(rule) with Action('Checking security group {}..'.format(sg_name)): sg = get_security_group(region, sg_name) if sg: for rule in sg.ip_permissions: # NOTE: boto object has port as string! for proto, port in rules: if rule['IpProtocol'] == proto and rule['FromPort'] == int(port): rules_missing.remove((proto, port)) if sg: return rules_missing else: create_sg = click.confirm('Security group {} does not exist. Do you want Senza to create it now?'.format( sg_name), default=True) if create_sg: ec2c = boto3.client('ec2', region) # FIXME which vpc? vpc = ec2c.describe_vpcs()['Vpcs'][0] sg = ec2c.create_security_group(GroupName=sg_name, Description='Application security group', VpcId=vpc['VpcId']) ec2c.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Name', 'Value': sg_name}]) ip_permissions = [] for proto, port in rules: ip_permissions.append({'IpProtocol': proto, 'FromPort': port, 'ToPort': port, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}) if allow_from_self: ip_permissions.append({'IpProtocol': '-1', 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}) ec2c.authorize_security_group_ingress(GroupId=sg['GroupId'], IpPermissions=ip_permissions) return set() def get_mint_bucket_name(region: str): account_id = get_account_id() account_alias = get_account_alias() s3 = boto3.resource('s3') parts = account_alias.split('-') prefix = parts[0] bucket_name = '{}-stups-mint-{}-{}'.format(prefix, account_id, region) bucket = s3.Bucket(bucket_name) try: bucket.load() return bucket.name except: bucket = None for bucket in s3.buckets.all(): if bucket.name.startswith('{}-stups-mint-{}-'.format(prefix, account_id)): return bucket.name return bucket_name def get_iam_role_policy(application_id: str, bucket_name: str, region: str): return { "Version": "2012-10-17", "Statement": [ { "Sid": "AllowMintRead", "Effect": "Allow", "Action": [ "s3:GetObject" ], "Resource": [ "arn:aws:s3:::{}/{}/*".format(bucket_name, application_id) ] }, ] } def check_iam_role(application_id: str, bucket_name: str, region: str): role_name = 'app-{}'.format(application_id) with Action('Checking IAM role {}..'.format(role_name)): iam = boto3.client('iam') exists = False try: iam.get_role(RoleName=role_name) exists = True except: pass assume_role_policy_document = {'Statement': [{'Action': 'sts:AssumeRole', 'Effect': 'Allow', 'Principal': {'Service': 'ec2.amazonaws.com'}, 'Sid': ''}], 'Version': '2008-10-17'} if not exists: with Action('Creating IAM role {}..'.format(role_name)): iam.create_role(RoleName=role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy_document)) update_policy = bucket_name is not None and (not exists or click.confirm('IAM role {} already exists. '.format(role_name) + 'Do you want Senza to overwrite the role policy?')) if update_policy: with Action('Updating IAM role policy of {}..'.format(role_name)): policy = get_iam_role_policy(application_id, bucket_name, region) iam.put_role_policy(RoleName=role_name, PolicyName=role_name, PolicyDocument=json.dumps(policy)) def check_s3_bucket(bucket_name: str, region: str): s3 = boto3.resource('s3', region) with Action("Checking S3 bucket {}..".format(bucket_name)): exists = False try: s3.meta.client.head_bucket(Bucket=bucket_name) exists = True except: pass if not exists: with Action("Creating S3 bucket {}...".format(bucket_name)): s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) PK!E5Gv:senza/templates/rediscluster.py''' Elasticache cluster running multiple redis nodes, with replication / HA ''' from clickclick import warning from senza.utils import pystache_render from ._helper import prompt, check_security_group, check_value TEMPLATE = ''' # basic information for generating and executing this definition SenzaInfo: StackName: {{ application_id }} # a list of senza components to apply to the definition SenzaComponents: # this basic configuraation is required for the other components - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup - {{ application_id }}: Type: Senza::RedisCluster CacheNodeType: {{ instance_type }} NumberOfNodes: {{ number_of_nodes }} SecurityGroups: - redis-{{ application_id }} ''' def gather_user_variables(variables, region, account_info): # maximal 32 characters because of the loadbalancer-name prompt(variables, 'application_id', 'Application ID', default='hello-world', value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$')) prompt(variables, 'instance_type', 'EC2 instance type', default='cache.m3.medium') prompt(variables, 'number_of_nodes', 'Number of nodes in cluster', default='2', value_proc=check_value(1, '^[2-5]$')) sg_name = 'redis-{}'.format(variables['application_id']) rules_missing = check_security_group(sg_name, [('tcp', 6379)], region, allow_from_self=True) if ('tcp', 6379) in rules_missing: warning('Security group {} does not allow tcp/6379 access yet, you will not be able to access redis'.format( sg_name)) return variables def generate_definition(variables): definition_yaml = pystache_render(TEMPLATE, variables) return definition_yaml PK!E5GfM M senza/templates/bgapp.py''' Background app with single EC2 instance ''' from clickclick import warning from senza.utils import pystache_render from ._helper import prompt, confirm, check_security_group, check_iam_role, get_mint_bucket_name, check_value TEMPLATE = ''' # basic information for generating and executing this definition SenzaInfo: StackName: {{application_id}} Parameters: - ImageVersion: Description: "Docker image version of {{ application_id }}." # a list of senza components to apply to the definition SenzaComponents: # this basic configuration is required for the other components - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup # will create a launch configuration and auto scaling group with scaling triggers - AppServer: Type: Senza::TaupageAutoScalingGroup InstanceType: {{ instance_type }} SecurityGroups: - app-{{application_id}} IamRoles: - app-{{application_id}} AssociatePublicIpAddress: false # change for standalone deployment in default VPC TaupageConfig: application_version: "{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" runtime: Docker source: "{{ docker_image }}:{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" {{#mint_bucket}} mint_bucket: "{{ mint_bucket }}" {{/mint_bucket}} ''' def gather_user_variables(variables, region, account_info): prompt(variables, 'application_id', 'Application ID', default='hello-world', value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$')) prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")', default='stups/hello-world') prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') if 'pierone' in variables['docker_image'] or confirm('Did you need OAuth-Credentials from Mint?'): prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region)) else: variables['mint_bucket'] = None sg_name = 'app-{}'.format(variables['application_id']) rules_missing = check_security_group(sg_name, [('tcp', 22)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format( sg_name)) check_iam_role(variables['application_id'], variables['mint_bucket'], region) return variables def generate_definition(variables): definition_yaml = pystache_render(TEMPLATE, variables) return definition_yaml PKbFsenza/templates/__init__.pyPK!E5Gu&&senza/templates/redisnode.py''' Elasticache node running redis, without replication / HA (for caching) ''' from clickclick import warning from senza.utils import pystache_render from ._helper import prompt, check_security_group, check_value TEMPLATE = ''' # basic information for generating and executing this definition SenzaInfo: StackName: {{ application_id }} # a list of senza components to apply to the definition SenzaComponents: # this basic configuraation is required for the other components - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup - {{ application_id }}: Type: Senza::RedisNode CacheNodeType: {{ instance_type }} SecurityGroups: - redis-{{ application_id }} ''' def gather_user_variables(variables, region, account_info): # maximal 32 characters because of the loadbalancer-name prompt(variables, 'application_id', 'Application ID', default='hello-world', value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$')) prompt(variables, 'instance_type', 'EC2 instance type', default='cache.t2.small') sg_name = 'redis-{}'.format(variables['application_id']) rules_missing = check_security_group(sg_name, [('tcp', 6379)], region, allow_from_self=True) if ('tcp', 6379) in rules_missing: warning('Security group {} does not allow tcp/6379 access, you will not be able to access your redis'.format( sg_name)) return variables def generate_definition(variables): definition_yaml = pystache_render(TEMPLATE, variables) return definition_yaml PK~[G:@@*stups_senza-0.89.dist-info/DESCRIPTION.rst===== Senza ===== .. image:: https://travis-ci.org/zalando-stups/senza.svg?branch=master :target: https://travis-ci.org/zalando-stups/senza :alt: Build Status .. image:: https://coveralls.io/repos/zalando-stups/senza/badge.svg :target: https://coveralls.io/r/zalando-stups/senza :alt: Code Coverage .. image:: https://img.shields.io/pypi/dw/stups-senza.svg :target: https://pypi.python.org/pypi/stups-senza/ :alt: PyPI Downloads .. image:: https://img.shields.io/pypi/v/stups-senza.svg :target: https://pypi.python.org/pypi/stups-senza/ :alt: Latest PyPI version .. image:: https://img.shields.io/pypi/l/stups-senza.svg :target: https://pypi.python.org/pypi/stups-senza/ :alt: License Senza is a command line tool for generating and executing AWS Cloud Formation templates in a sane way. It supports Cloud Formation templates as YAML input and adds own 'components' on top. Components are predefined Cloud Formation snippets that are easy to configure and generate all the boilerplate JSON that is required by Cloud Formation. Installation ============ .. code-block:: bash $ sudo pip3 install --upgrade stups-senza Usage ===== .. code-block:: bash $ senza init my-definition.yaml # bootstrap a new app $ senza create ./my-definition.yaml 1 1.0 Please read the `STUPS documentation on Senza`_ to learn more. Senza Definition ================ .. code-block:: yaml # basic information for generating and executing this definition SenzaInfo: StackName: kio OperatorTopicId: kio-operators Parameters: - ImageVersion: Description: "Docker image version of Kio." # a list of senza components to apply to the definition SenzaComponents: - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup # will create a launch configuration and auto scaling group with min/max=1 - AppServer: Type: Senza::TaupageAutoScalingGroup InstanceType: t2.micro SecurityGroups: [app-kio] # can be either name or id ("sg-..") ElasticLoadBalancer: AppLoadBalancer TaupageConfig: runtime: Docker source: stups/kio:{{Arguments.ImageVersion}} ports: 8080: 8080 environment: PGSSLMODE: verify-full DB_SUBNAME: "//kio.example.eu-west-1.rds.amazonaws.com:5432/kio?ssl=true" DB_USER: kio DB_PASSWORD: aws:kms:abcdef1234567890abcdef= # creates an ELB entry and Route53 domains to this ELB - AppLoadBalancer: Type: Senza::WeightedDnsElasticLoadBalancer HTTPPort: 8080 HealthCheckPath: /ui/ SecurityGroups: [app-kio-lb] Scheme: internet-facing # just plain Cloud Formation definitions are fully supported: Outputs: URL: Description: "The ELB URL of the new Kio deployment." Value: "Fn::Join": - "" - - "http://" - "Fn::GetAtt": - AppLoadBalancer - DNSName During evaluation, you can mustache templating with access to the rendered definition, including the SenzaInfo, SenzaComponents and Arguments key (containing all given arguments). See the `STUPS documentation on Senza`_ for details. .. _STUPS documentation on Senza: http://stups.readthedocs.org/en/latest/components/senza.html Unit Tests ========== .. code-block:: bash $ python3 setup.py test --cov-html=true Releasing ========= .. code-block:: bash $ ./release.sh PK~[GX**+stups_senza-0.89.dist-info/entry_points.txt[console_scripts] senza = senza.cli:main PK~[GB\44(stups_senza-0.89.dist-info/metadata.json{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython"], "extensions": {"python.commands": {"wrap_console": {"senza": "senza.cli:main"}}, "python.details": {"contacts": [{"email": "henning.jacobs@zalando.de", "name": "Henning Jacobs", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/zalando-stups/senza"}}, "python.exports": {"console_scripts": {"senza": "senza.cli:main"}}}, "extras": [], "generator": "bdist_wheel (0.26.0)", "keywords": ["aws", "cloud", "formation", "cf", "elb", "ec2", "stups", "immutable", "stacks", "route53", "boto"], "license": "Apache License 2.0", "metadata_version": "2.0", "name": "stups-senza", "run_requires": [{"requires": ["PyYAML", "boto3 (>=1.1.3)", "botocore (>=1.2.1)", "clickclick (>=0.14)", "dnspython3", "pystache", "pytest (>=2.7.3)", "stups-pierone (>=0.7)"]}], "summary": "AWS Cloud Formation deployment CLI", "test_requires": [{"requires": ["pytest", "pytest-cov"]}], "version": "0.89"}PKvd.G7//#stups_senza-0.89.dist-info/pbr.json{"git_version": "3d9e269", "is_release": false}PK~[G s(stups_senza-0.89.dist-info/top_level.txtsenza PK~[G}\\ stups_senza-0.89.dist-info/WHEELWheel-Version: 1.0 Generator: bdist_wheel (0.26.0) Root-Is-Purelib: true Tag: py3-none-any PK~[GlDD#stups_senza-0.89.dist-info/METADATAMetadata-Version: 2.0 Name: stups-senza Version: 0.89 Summary: AWS Cloud Formation deployment CLI Home-page: https://github.com/zalando-stups/senza Author: Henning Jacobs Author-email: henning.jacobs@zalando.de License: Apache License 2.0 Keywords: aws cloud formation cf elb ec2 stups immutable stacks route53 boto Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Dist: PyYAML Requires-Dist: boto3 (>=1.1.3) Requires-Dist: botocore (>=1.2.1) Requires-Dist: clickclick (>=0.14) Requires-Dist: dnspython3 Requires-Dist: pystache Requires-Dist: pytest (>=2.7.3) Requires-Dist: stups-pierone (>=0.7) ===== Senza ===== .. image:: https://travis-ci.org/zalando-stups/senza.svg?branch=master :target: https://travis-ci.org/zalando-stups/senza :alt: Build Status .. image:: https://coveralls.io/repos/zalando-stups/senza/badge.svg :target: https://coveralls.io/r/zalando-stups/senza :alt: Code Coverage .. image:: https://img.shields.io/pypi/dw/stups-senza.svg :target: https://pypi.python.org/pypi/stups-senza/ :alt: PyPI Downloads .. image:: https://img.shields.io/pypi/v/stups-senza.svg :target: https://pypi.python.org/pypi/stups-senza/ :alt: Latest PyPI version .. image:: https://img.shields.io/pypi/l/stups-senza.svg :target: https://pypi.python.org/pypi/stups-senza/ :alt: License Senza is a command line tool for generating and executing AWS Cloud Formation templates in a sane way. It supports Cloud Formation templates as YAML input and adds own 'components' on top. Components are predefined Cloud Formation snippets that are easy to configure and generate all the boilerplate JSON that is required by Cloud Formation. Installation ============ .. code-block:: bash $ sudo pip3 install --upgrade stups-senza Usage ===== .. code-block:: bash $ senza init my-definition.yaml # bootstrap a new app $ senza create ./my-definition.yaml 1 1.0 Please read the `STUPS documentation on Senza`_ to learn more. Senza Definition ================ .. code-block:: yaml # basic information for generating and executing this definition SenzaInfo: StackName: kio OperatorTopicId: kio-operators Parameters: - ImageVersion: Description: "Docker image version of Kio." # a list of senza components to apply to the definition SenzaComponents: - Configuration: Type: Senza::StupsAutoConfiguration # auto-detect network setup # will create a launch configuration and auto scaling group with min/max=1 - AppServer: Type: Senza::TaupageAutoScalingGroup InstanceType: t2.micro SecurityGroups: [app-kio] # can be either name or id ("sg-..") ElasticLoadBalancer: AppLoadBalancer TaupageConfig: runtime: Docker source: stups/kio:{{Arguments.ImageVersion}} ports: 8080: 8080 environment: PGSSLMODE: verify-full DB_SUBNAME: "//kio.example.eu-west-1.rds.amazonaws.com:5432/kio?ssl=true" DB_USER: kio DB_PASSWORD: aws:kms:abcdef1234567890abcdef= # creates an ELB entry and Route53 domains to this ELB - AppLoadBalancer: Type: Senza::WeightedDnsElasticLoadBalancer HTTPPort: 8080 HealthCheckPath: /ui/ SecurityGroups: [app-kio-lb] Scheme: internet-facing # just plain Cloud Formation definitions are fully supported: Outputs: URL: Description: "The ELB URL of the new Kio deployment." Value: "Fn::Join": - "" - - "http://" - "Fn::GetAtt": - AppLoadBalancer - DNSName During evaluation, you can mustache templating with access to the rendered definition, including the SenzaInfo, SenzaComponents and Arguments key (containing all given arguments). See the `STUPS documentation on Senza`_ for details. .. _STUPS documentation on Senza: http://stups.readthedocs.org/en/latest/components/senza.html Unit Tests ========== .. code-block:: bash $ python3 setup.py test --cov-html=true Releasing ========= .. code-block:: bash $ ./release.sh PK~[G%k !stups_senza-0.89.dist-info/RECORDsenza/__init__.py,sha256=_Y276YImxnpV6-jburGFOdAf0TmzBZWD1pQgQsGBblE,21 senza/__main__.py,sha256=Bo-ai17xihpGrzXuajJcGOoywIOODbHcnNCxZO0IhNo,66 senza/aws.py,sha256=MBJmjdrz9YqcqF4c-sQ9tdQmSfGSMr_jO2HgiBaQ4M4,7572 senza/cli.py,sha256=KIN4E2sqIGZwo_p8uTysxcD9UkoIvtePD3O7Ytju2_w,46274 senza/docker.py,sha256=sUYC8OlxZlauckSWoz9wMI0XVahjnYQAhk4LrF_1LEU,775 senza/traffic.py,sha256=fJTLW50BkGEgVQzwUP1fYhY5msyo_7Kwdes6RFvz7JI,16098 senza/utils.py,sha256=CSpVwNklwh3_3bJ2S0ABHVpU3r2Ye5YRH-3irJFP8xI,731 senza/components/__init__.py,sha256=IfB-BRt6aeeOge3Zp4ISEKy77mWUEOvnYbW0QmtUE30,937 senza/components/auto_scaling_group.py,sha256=7DklLH15Mt7FUYgFM3wM1CljY8h_gogaLk_iRfayx58,9812 senza/components/configuration.py,sha256=cEQLSiMUasYJG2bZJ1nskrH6j9R75MFW63ArJ0RFUdQ,2306 senza/components/elastic_load_balancer.py,sha256=nBYsVacp_ZAcCfr63MGQjhFcI69-3KcxnZlcj2U1f3E,6299 senza/components/iam_role.py,sha256=bRgnNOUw0H3n03B-qOgccyE70ehNQcS8TY8hLsoLsT8,1378 senza/components/redis_cluster.py,sha256=0Mex8bT1k2EX0kXtMJQeFNbvF5xZ7LlCNvl5ysSyIq8,1556 senza/components/redis_node.py,sha256=8-UTw8gSKSKLhK76ZPRv7cZysz0Z8MqbWkOmc_AAJ4Q,1280 senza/components/stups_auto_configuration.py,sha256=X3JuRLfJmRAzUY0FxQf42-5Hz1d0Euhmjb4W7YkF6iA,2968 senza/components/taupage_auto_scaling_group.py,sha256=c1WsnGq4Jtsg15Mq9MklyLbvaWvefojfXlRep46AfIU,4841 senza/components/weighted_dns_elastic_load_balancer.py,sha256=zZ59D2TriWBW0dHx80yR2CxHjYgNJSG-FZu2HRJceoo,1540 senza/templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 senza/templates/_helper.py,sha256=RMTXn5P8ZJo41w6PBjzns2W0IuYbVMjetPVq6cNV834,6074 senza/templates/bgapp.py,sha256=30BihLKameyS1EdGIr-ff4xn99qfx7x-SRBYm0FF3m4,2637 senza/templates/postgresapp.py,sha256=l708arexUwcTXhxewu6NNOtffWe0h1jG6DZfDP2rvEo,10680 senza/templates/rediscluster.py,sha256=GcO3vqjIURJGc7lx4Em5m42gOEjgbHVBcZxyhjuXoBw,1755 senza/templates/redisnode.py,sha256=_Xs6Kju1Coh8qmQVEBmBtiy3eyxsXaYqu-VyFQtXoiM,1574 senza/templates/webapp.py,sha256=wylKXVBEWCW3kw6P1AxFvB5KpMO3hhwqDLAIo5vH9jI,4314 stups_senza-0.89.dist-info/DESCRIPTION.rst,sha256=sr7QYi1p-Rm1ufkroVuqPkm7blJd6_vRWiqLeRcMsfg,3648 stups_senza-0.89.dist-info/METADATA,sha256=EWXaOfSwo1KuDMGCayRfD0c4I9RJZac5_S-mLBtCbLQ,4676 stups_senza-0.89.dist-info/RECORD,, stups_senza-0.89.dist-info/WHEEL,sha256=zX7PHtH_7K-lEzyK75et0UBa3Bj8egCBMXe1M4gc6SU,92 stups_senza-0.89.dist-info/entry_points.txt,sha256=Qz1QYSA74gYETh3hw_Ax7m7uF_p9HxRO3OiomvR8Kwc,42 stups_senza-0.89.dist-info/metadata.json,sha256=pSaUatkrbGKvcHMXRAoEdyESf4NvPH22Qzc3XPWlicA,1332 stups_senza-0.89.dist-info/pbr.json,sha256=thDkcv8KtUcBpnwtDFU1XSxfjGhx94V1Vif8eJkqI-s,47 stups_senza-0.89.dist-info/top_level.txt,sha256=dtHv-7_A_7jp5VM0gkgOnmbyvYCNaDhqT0bEf7bS-7k,6 PKbF\senza/docker.pyPKlGj4senza/utils.pyPK}[G( ´´ ;senza/cli.pyPKxr[G]>>'senza/traffic.pyPKbF9DBB7senza/__main__.pyPKNGVd senza/aws.pyPK~[G9fsenza/__init__.pyPK^LG.senza/components/taupage_auto_scaling_group.pyPK!E5GCŭ6+senza/components/weighted_dns_elastic_load_balancer.pyPK}[Go ,72senza/components/stups_auto_configuration.pyPKNG<՜  !>senza/components/configuration.pyPKXPOGCBubT&T&&ZGsenza/components/auto_scaling_group.pyPK!E5Gs+bbmsenza/components/iam_role.pyPK!E5G)|6!ssenza/components/redis_cluster.pyPK!E5GF ysenza/components/redis_node.pyPKb6GqY)senza/components/elastic_load_balancer.pyPK|W.G~+csenza/components/__init__.pyPKb6G\qy))senza/templates/postgresapp.pyPK!E5G66senza/templates/webapp.pyPKT6G_"senza/templates/_helper.pyPK!E5Gv:senza/templates/rediscluster.pyPK!E5GfM M senza/templates/bgapp.pyPKbFtsenza/templates/__init__.pyPK!E5Gu&&senza/templates/redisnode.pyPK~[G:@@* stups_senza-0.89.dist-info/DESCRIPTION.rstPK~[GX**+stups_senza-0.89.dist-info/entry_points.txtPK~[GB\44(stups_senza-0.89.dist-info/metadata.jsonPKvd.G7//#stups_senza-0.89.dist-info/pbr.jsonPK~[G s(stups_senza-0.89.dist-info/top_level.txtPK~[G}\\ >stups_senza-0.89.dist-info/WHEELPK~[GlDD#stups_senza-0.89.dist-info/METADATAPK~[G%k !]/stups_senza-0.89.dist-info/RECORDPK _: