PK!poetry/__init__.pyimport os import sys _ROOT = os.path.dirname(os.path.realpath(__file__)) _VENDOR = os.path.join(_ROOT, '_vendor') # Add vendored dependencies to path. sys.path.insert(0, _VENDOR) from .__version__ import __version__ # noqa PK!{Հ[[poetry/__main__.pyimport sys if __name__ == '__main__': from .console import main sys.exit(main()) PK!'poetry/__version__.py__version__ = '0.8.3' PK!뚩poetry/_vendor/.gitignore* !.gitignore PK!F$zLLpoetry/config.pyfrom typing import Any from .locations import CONFIG_DIR from .utils._compat import Path from .utils.toml_file import TomlFile from .utils.toml_file import TOMLFile class Config: def __init__(self, file): # type: (TomlFile) -> None self._file = file if not self._file.exists(): self._raw_content = {} self._content = TOMLFile([]) else: self._raw_content = file.read(raw=True) self._content = file.read() @property def name(self): return str(self._file.path) @property def file(self): return self._file @property def raw_content(self): return self._raw_content @property def content(self): return self._content def setting(self, setting_name, default=None): # type: (str) -> Any """ Retrieve a setting value. """ keys = setting_name.split('.') config = self._raw_content for key in keys: if key not in config: return default config = config[key] return config def add_property(self, key, value): keys = key.split('.') config = self._content for i, key in enumerate(keys): if key not in config and i < len(keys) - 1: config[key] = {} if i == len(keys) - 1: config[key] = value break config = config[key] self.dump() def remove_property(self, key): keys = key.split('.') config = self._content for i, key in enumerate(keys): if key not in config: return if i == len(keys) - 1: del config[key] break config = config[key] self.dump() def dump(self): self._file.write(self._content) @classmethod def create(cls, file, base_dir=None): # type: (...) -> Config if base_dir is None: base_dir = CONFIG_DIR file = TomlFile(Path(base_dir) / file) return cls(file) PK!RRpoetry/console/__init__.pyfrom .application import Application def main(): return Application().run() PK! \C C poetry/console/application.pyimport os from cleo import Application as BaseApplication from cleo.inputs import ArgvInput from cleo.outputs import ConsoleOutput from poetry import __version__ from poetry.io.raw_argv_input import RawArgvInput from .commands import AboutCommand from .commands import AddCommand from .commands import BuildCommand from .commands import CheckCommand from .commands import ConfigCommand from .commands import InstallCommand from .commands import LockCommand from .commands import NewCommand from .commands import PublishCommand from .commands import RemoveCommand from .commands import RunCommand from .commands import ScriptCommand from .commands import SearchCommand from .commands import ShowCommand from .commands import UpdateCommand from .commands import VersionCommand from .commands.debug import DebugInfoCommand from .commands.debug import DebugResolveCommand from .commands.self import SelfUpdateCommand class Application(BaseApplication): def __init__(self): super(Application, self).__init__('Poetry', __version__) self._poetry = None self._skip_io_configuration = False @property def poetry(self): from poetry.poetry import Poetry if self._poetry is not None: return self._poetry self._poetry = Poetry.create(os.getcwd()) return self._poetry def reset_poetry(self): # type: () -> None self._poetry = None def run(self, i=None, o=None): # type: (...) -> int if i is None: i = ArgvInput() if o is None: o = ConsoleOutput() name = i.get_first_argument() if name in ['run', 'script']: self._skip_io_configuration = True i = RawArgvInput() return super(Application, self).run(i, o) def do_run(self, i, o): name = self.get_command_name(i) if name not in ['run', 'script']: return super(Application, self).do_run(i, o) command = self.find(name) self._running_command = command status_code = command.run(i, o) self._running_command = None return status_code def configure_io(self, i, o): if self._skip_io_configuration: return super(Application, self).configure_io(i, o) def get_default_commands(self): # type: () -> list commands = super(Application, self).get_default_commands() commands += [ AboutCommand(), AddCommand(), BuildCommand(), CheckCommand(), ConfigCommand(), InstallCommand(), LockCommand(), NewCommand(), PublishCommand(), RemoveCommand(), RunCommand(), ScriptCommand(), SearchCommand(), ShowCommand(), UpdateCommand(), VersionCommand(), ] # Debug commands commands += [ DebugInfoCommand(), DebugResolveCommand(), ] # Self commands commands += [ SelfUpdateCommand(), ] return commands PK!%#poetry/console/commands/__init__.pyfrom .about import AboutCommand from .add import AddCommand from .build import BuildCommand from .check import CheckCommand from .config import ConfigCommand from .install import InstallCommand from .lock import LockCommand from .new import NewCommand from .publish import PublishCommand from .remove import RemoveCommand from .run import RunCommand from .script import ScriptCommand from .search import SearchCommand from .show import ShowCommand from .update import UpdateCommand from .version import VersionCommand PK!0  poetry/console/commands/about.pyfrom .command import Command class AboutCommand(Command): """ Short information about Poetry. about """ def handle(self): self.line("""Poetry - Package Management for Python Poetry is a dependency manager tracking local dependencies of your projects and libraries. See https://github.com/sdispater/poetry for more information. """) PK!23poetry/console/commands/add.pyimport re from typing import List from typing import Tuple from .venv_command import VenvCommand class AddCommand(VenvCommand): """ Add a new dependency to pyproject.toml. add { name* : Packages to add. } {--D|dev : Add package as development dependency. } {--optional : Add as an optional dependency. } { --allow-prereleases : Accept prereleases. } {--dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } """ help = """The add command adds required packages to your pyproject.toml and installs them. If you do not specify a version constraint, poetry will choose a suitable one based on the available package versions. """ def handle(self): from poetry.installation import Installer from poetry.semver.version_parser import VersionParser packages = self.argument('name') is_dev = self.option('dev') section = 'dependencies' if is_dev: section = 'dev-dependencies' original_content = self.poetry.file.read() content = self.poetry.file.read() poetry_content = content['tool']['poetry'] for name in packages: for key in poetry_content[section]: if key.lower() == name.lower(): raise ValueError( 'Package {} is already present'.format(name) ) requirements = self._determine_requirements( packages, allow_prereleases=self.option('allow-prereleases') ) requirements = self._format_requirements(requirements) # validate requirements format parser = VersionParser() for constraint in requirements.values(): parser.parse_constraints(constraint) for name, constraint in requirements.items(): if self.option('optional') or self.option('allow-prereleases'): constraint = { 'version': constraint } if self.option('optional'): constraint = { 'optional': True } if self.option('allow-prereleases'): constraint['allows-prereleases'] = True poetry_content[section][name] = constraint # Write new content self.poetry.file.write(content) # Cosmetic new line self.line('') # Update packages self.reset_poetry() installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool ) installer.dry_run(self.option('dry-run')) installer.update(True) installer.whitelist(requirements) try: status = installer.run() except Exception: self.poetry.file.write(original_content) raise if status != 0 or self.option('dry-run'): # Revert changes if not self.option('dry-run'): self.error( '\n' 'Addition failed, reverting pyproject.toml ' 'to its original content.' ) self.poetry.file.write(original_content) return status def _determine_requirements(self, requires, # type: List[str] allow_prereleases=False, # type: bool ): # type: (...) -> List[str] if not requires: return [] requires = self._parse_name_version_pairs(requires) result = [] for requirement in requires: if 'version' not in requirement: # determine the best version automatically name, version = self._find_best_version_for_package( requirement['name'], allow_prereleases=allow_prereleases ) requirement['version'] = version requirement['name'] = name self.line( 'Using version {} for {}' .format(version, name) ) else: # check that the specified version/constraint exists # before we proceed name, _ = self._find_best_version_for_package( requirement['name'], requirement['version'], allow_prereleases=allow_prereleases ) requirement['name'] = name result.append( '{} {}'.format(requirement['name'], requirement['version']) ) return result def _find_best_version_for_package(self, name, required_version=None, allow_prereleases=False ): # type: (...) -> Tuple[str, str] from poetry.version.version_selector import VersionSelector selector = VersionSelector(self.poetry.pool) package = selector.find_best_candidate( name, required_version, allow_prereleases=allow_prereleases ) if not package: # TODO: find similar raise ValueError( 'Could not find a matching version of package {}'.format(name) ) return ( package.pretty_name, selector.find_recommended_require_version(package) ) def _parse_name_version_pairs(self, pairs): # type: (list) -> list result = [] for i in range(len(pairs)): pair = re.sub('^([^=: ]+)[=: ](.*)$', '\\1 \\2', pairs[i].strip()) pair = pair.strip() if ' ' in pair: name, version = pair.split(' ', 2) result.append({ 'name': name, 'version': version }) else: result.append({ 'name': pair }) return result def _format_requirements(self, requirements): # type: (List[str]) -> dict requires = {} requirements = self._parse_name_version_pairs(requirements) for requirement in requirements: requires[requirement['name']] = requirement['version'] return requires PK!Xغ poetry/console/commands/build.pyfrom .venv_command import VenvCommand class BuildCommand(VenvCommand): """ Builds a package, as a tarball and a wheel by default. build { --f|format= : Limit the format to either wheel or sdist. } """ def handle(self): from poetry.masonry import Builder fmt = 'all' if self.option('format'): fmt = self.option('format') package = self.poetry.package self.line( 'Building {} ({})' .format(package.pretty_name, package.version) ) builder = Builder(self.poetry, self.venv, self.output) builder.build(fmt) PK!_(FF poetry/console/commands/check.pyfrom .command import Command class CheckCommand(Command): """ Checks the validity of the pyproject.toml file. check """ def handle(self): # Load poetry and display errors, if any self.poetry.check(self.poetry.local_config, strict=True) self.info('All set!') PK!cO"poetry/console/commands/command.pyfrom cleo import Command as BaseCommand from ..styles.poetry import PoetryStyle class Command(BaseCommand): @property def poetry(self): return self.get_application().poetry def reset_poetry(self): # type: () -> None self.get_application().reset_poetry() def run(self, i, o): # type: () -> int """ Initialize command. """ self.input = i self.output = PoetryStyle(i, o) return super(BaseCommand, self).run(i, o) PK!W}!poetry/console/commands/config.pyimport json import re from .command import Command TEMPLATE = """[settings] [repositories] """ AUTH_TEMPLATE = """[http-basic] """ class ConfigCommand(Command): """ Sets/Gets config options. config { key : Setting key. } { value?* : Setting value. } { --list : List configuration settings } { --unset : Unset configuration setting } """ help = """This command allows you to edit the poetry config settings and repositories.. To add a repository: poetry repositories.foo https://bar.com/simple/ To remove a repository (repo is a short alias for repositories): poetry --unset repo.foo """ def __init__(self): from poetry.config import Config super(ConfigCommand, self).__init__() self._config = Config.create('config.toml') self._auth_config = Config.create('auth.toml') def initialize(self, i, o): super(ConfigCommand, self).initialize(i, o) # Create config file if it does not exist if not self._config.file.exists(): self._config.file.parent.mkdir(parents=True, exist_ok=True) with self._config.file.open() as f: f.write(TEMPLATE) if not self._auth_config.file.exists(): self._auth_config.file.parent.mkdir(parents=True, exist_ok=True) with self._auth_config.file.open() as f: f.write(AUTH_TEMPLATE) def handle(self): if self.option('list'): self._list_configuration(self._config.raw_content) return 0 setting_key = self.argument('key') if not setting_key: return 0 if self.argument('value') and self.option('unset'): raise RuntimeError('You can not combine a setting value with --unset') # show the value if no value is provided if not self.argument('value') and not self.option('unset'): m = re.match('^repos?(?:itories)?(?:\.(.+))?', self.argument('key')) if m: if not m.group(1): value = {} if self._config.setting('repositories') is not None: value = self._config.setting('repositories') else: repo = self._config.setting( 'repositories.{}'.format(m.group(1)) ) if repo is None: raise ValueError( 'There is no {} repository defined' .format(m.group(1)) ) value = repo self.line(str(value)) return 0 values = self.argument('value') boolean_validator = lambda val: val in {'true', 'false', '1', '0'} boolean_normalizer = lambda val: True if val in ['true', '1'] else False unique_config_values = { 'settings.virtualenvs.create': (boolean_validator, boolean_normalizer), 'settings.pypi.fallback': (boolean_validator, boolean_normalizer), } if setting_key in unique_config_values: if self.option('unset'): return self._remove_single_value(setting_key) return self._handle_single_value( setting_key, unique_config_values[setting_key], values ) # handle repositories m = re.match('^repos?(?:itories)?(?:\.(.+))?', self.argument('key')) if m: if not m.group(1): raise ValueError('You cannot remove the [repositories] section') if self.option('unset'): repo = self._config.setting( 'repositories.{}'.format(m.group(1)) ) if repo is None: raise ValueError( 'There is no {} repository defined'.format(m.group(1)) ) self._config.remove_property( 'repositories.{}'.format(m.group(1)) ) return 0 if len(values) == 1: url = values[0] self._config.add_property( 'repositories.{}.url'.format(m.group(1)), url ) return 0 raise ValueError( 'You must pass the url. ' 'Example: poetry config repositories.foo https://bar.com' ) # handle auth m = re.match('^(http-basic)\.(.+)', self.argument('key')) if m: if self.option('unset'): if not self._auth_config.setting('{}.{}'.format(m.group(1), m.group(2))): raise ValueError( 'There is no {} {} defined'.format( m.group(2), m.group(1) ) ) self._auth_config.remove_property( '{}.{}'.format(m.group(1), m.group(2)) ) return 0 if m.group(1) == 'http-basic': if len(values) == 1: username = values[0] # Only username, so we prompt for password password = self.secret('Password:') elif len(values) != 2: raise ValueError( 'Expected one or two arguments ' '(username, password), got {}'.format(len(values)) ) else: username = values[0] password = values[1] self._auth_config.add_property( '{}.{}'.format(m.group(1), m.group(2)), { 'username': username, 'password': password } ) return 0 raise ValueError( 'Setting {} does not exist'.format(self.argument("key")) ) def _handle_single_value(self, key, callbacks, values): validator, normalizer = callbacks if len(values) > 1: raise RuntimeError('You can only pass one value.') value = values[0] if not validator(value): raise RuntimeError( '"{}" is an invalid value for {}'.format(value, key) ) self._config.add_property(key, normalizer(value)) return 0 def _remove_single_value(self, key): self._config.remove_property(key) return 0 def _list_configuration(self, contents, k=None): orig_k = k for key, value in contents.items(): if k is None and key not in ['config', 'repositories', 'settings']: continue if isinstance(value, dict) or key == 'repositories' and k is None: if k is None: k = '' k += re.sub('^config\.', '', key + '.') self._list_configuration(value, k=k) k = orig_k continue if isinstance(value, list): value = [ json.dumps(val) if isinstance(val, list) else val for val in value ] value = '[{}]'.format(", ".join(value)) value = json.dumps(value) self.line( '[{}] {}'.format( (k or "") + key, value ) ) PK!LL)poetry/console/commands/debug/__init__.pyfrom .info import DebugInfoCommand from .resolve import DebugResolveCommand PK!zV%poetry/console/commands/debug/info.pyimport os import sys from ..venv_command import VenvCommand class DebugInfoCommand(VenvCommand): """ Shows debug information. debug:info """ def handle(self): poetry = self.poetry package = poetry.package venv = self.venv poetry_python_version = '.'.join(str(s) for s in sys.version_info[:3]) self.output.title('Poetry') self.output.listing([ 'Version: {}'.format(poetry.VERSION), 'Python: {}'.format(poetry_python_version) ]) self.line('') venv_python_version = '.'.join(str(s) for s in venv.version_info[:3]) self.output.title('Virtualenv') self.output.listing([ 'Python: {}'.format( venv_python_version ), 'Implementation: {}'.format( venv.python_implementation ), 'Path: {}'.format( venv.venv if venv.is_venv() else 'NA' ) ]) self.line('') self.output.title('System') self.output.listing([ 'Platform: {}'.format(sys.platform), 'OS: {}'.format(os.name), ]) self.line('') PK!>8 (poetry/console/commands/debug/resolve.pyimport re from typing import List from ..command import Command class DebugResolveCommand(Command): """ Debugs dependency resolution. debug:resolve { package?* : packages to resolve. } """ def handle(self): from poetry.packages import Dependency from poetry.puzzle import Solver from poetry.repositories.repository import Repository from poetry.semver.version_parser import VersionParser packages = self.argument('package') if not packages: package = self.poetry.package dependencies = package.requires + package.dev_requires else: requirements = self._determine_requirements(packages) requirements = self._format_requirements(requirements) # validate requirements format parser = VersionParser() for constraint in requirements.values(): parser.parse_constraints(constraint) dependencies = [] for name, constraint in requirements.items(): dependencies.append( Dependency(name, constraint) ) solver = Solver( self.poetry.package, self.poetry.pool, Repository(), Repository(), self.output ) ops = solver.solve(dependencies) self.line('') self.line('Resolution results:') self.line('') for op in ops: package = op.package self.line( ' - {} ({})' .format( package.name, package.version ) ) def _determine_requirements(self, requires): # type: (List[str]) -> List[str] if not requires: return [] requires = self._parse_name_version_pairs(requires) result = [] for requirement in requires: if 'version' not in requirement: requirement['version'] = '*' result.append( '{} {}'.format(requirement['name'], requirement['version']) ) return result def _parse_name_version_pairs(self, pairs): # type: (list) -> list result = [] for i in range(len(pairs)): pair = re.sub('^([^=: ]+)[=: ](.*)$', '\\1 \\2', pairs[i].strip()) pair = pair.strip() if ' ' in pair: name, version = pair.split(' ', 2) result.append({ 'name': name, 'version': version }) else: result.append({ 'name': pair }) return result def _format_requirements(self, requirements): # type: (List[str]) -> dict requires = {} requirements = self._parse_name_version_pairs(requirements) for requirement in requirements: requires[requirement['name']] = requirement['version'] return requires PK!"poetry/console/commands/install.pyfrom .venv_command import VenvCommand class InstallCommand(VenvCommand): """ Installs the project dependencies. install { --no-dev : Do not install dev dependencies. } { --dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } { --E|extras=* : Extra sets of dependencies to install (multiple values allowed). } """ help = """The install command reads the pyproject.toml file from the current directory, processes it, and downloads and installs all the libraries and dependencies outlined in that file. If the file does not exist it will look for pyproject.toml and do the same. poetry install """ def handle(self): from poetry.installation import Installer installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool ) installer.extras(self.option('extras')) installer.dev_mode(not self.option('no-dev')) installer.dry_run(self.option('dry-run')) installer.verbose(self.option('verbose')) return installer.run() PK!@poetry/console/commands/lock.pyfrom .venv_command import VenvCommand class LockCommand(VenvCommand): """ Locks the project dependencies. lock """ help = """The lock command reads the pyproject.toml file from the current directory, processes it, and locks the depdencies in the pyproject.lock file. poetry lock """ def handle(self): from poetry.installation import Installer installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool ) installer.update(True) installer.execute_operations(False) return installer.run() PK!е?hhpoetry/console/commands/new.pyfrom poetry.utils._compat import Path from .command import Command class NewCommand(Command): """ Creates a new Python project at new { path : The path to create the project at. } { --name : Set the resulting package name. } """ def handle(self): from poetry.layouts import layout layout_ = layout('standard') path = Path.cwd() / Path(self.argument('path')) name = self.option('name') if not name: name = path.name if path.exists(): if list(path.glob('*')): # Directory is not empty. Aborting. raise RuntimeError( 'Destination {} ' 'exists and is not empty'.format( path ) ) readme_format = 'rst' layout_ = layout_(name, '0.1.0', readme_format=readme_format) layout_.create(path) self.line( 'Created package {} in {}' .format(name, path.relative_to(Path.cwd())) ) PK!"poetry/console/commands/publish.pyfrom .command import Command class PublishCommand(Command): """ Publishes a package to a remote repository. publish { --r|repository= : The repository to publish the package to. } { --no-build : Do not build the package before publishing. } """ help = """The publish command builds and uploads the package to a remote repository. By default, it will upload to PyPI but if you pass the --repository option it will upload to it instead. The --repository option should match the name of a configured repository using the config command. """ def handle(self): from poetry.masonry.publishing.publisher import Publisher # Building package first, unless told otherwise if not self.option('no-build'): self.call('build') self.line('') publisher = Publisher(self.poetry, self.output) publisher.publish(self.option('repository')) PK!o !poetry/console/commands/remove.pyfrom .venv_command import VenvCommand class RemoveCommand(VenvCommand): """ Removes a package from the project dependencies. remove { packages* : Packages that should be removed. } {--D|dev : Removes a package from the development dependencies. } {--dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } """ help = """The remove command removes a package from the current list of installed packages poetry remove""" def handle(self): from poetry.installation import Installer packages = self.argument('packages') is_dev = self.option('dev') original_content = self.poetry.file.read() content = self.poetry.file.read() poetry_content = content['tool']['poetry'] section = 'dependencies' if is_dev: section = 'dev-dependencies' # Deleting entries requirements = {} for name in packages: found = False for key in poetry_content[section]: if key.lower() == name.lower(): found = True requirements[name] = poetry_content[section][name] break if not found: raise ValueError('Package {} not found'.format(name)) for key in requirements: del poetry_content[section][key] # Write the new content back self.poetry.file.write(content) # Update packages self.reset_poetry() installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool ) installer.dry_run(self.option('dry-run')) installer.update(True) installer.whitelist(requirements) try: status = installer.run() except Exception: self.poetry.file.write(original_content) raise if status != 0 or self.option('dry-run'): # Revert changes if not self.option('dry-run'): self.error( '\n' 'Removal failed, reverting pyproject.toml ' 'to its original content.' ) self.poetry.file.write(original_content) return status PK!X@poetry/console/commands/run.pyfrom .venv_command import VenvCommand class RunCommand(VenvCommand): """ Runs a command in the appropriate environment. run { args* : The command and arguments/options to run. } """ def handle(self): args = self.argument('args') venv = self.venv return venv.execute(*args) def merge_application_definition(self, merge_args=True): if self._application is None \ or (self._application_definition_merged and (self._application_definition_merged_with_args or not merge_args)): return if merge_args: current_arguments = self._definition.get_arguments() self._definition.set_arguments(self._application.get_definition().get_arguments()) self._definition.add_arguments(current_arguments) self._application_definition_merged = True if merge_args: self._application_definition_merged_with_args = True PK!l.!poetry/console/commands/script.pyimport sys from .venv_command import VenvCommand class ScriptCommand(VenvCommand): """ Executes a script defined in pyproject.toml script { script-name : The name of the script to execute } { args?* : The command and arguments/options to pass to the script. } """ def handle(self): script = self.argument('script-name') argv = [script] + self.argument('args') scripts = self.poetry.local_config.get('scripts') if not scripts: raise RuntimeError('No scripts defined in pyproject.toml') if script not in scripts: raise ValueError('Script {} is not defined'.format(script)) module, callable_ = scripts[script].split(':') cmd = ['python', '-c'] cmd += [ '"import sys; ' 'from importlib import import_module; ' 'sys.argv = {!r}; ' 'import_module(\'{}\').{}()"'.format( argv, module, callable_ ) ] self.venv.run(*cmd, shell=True, call=True) def merge_application_definition(self, merge_args=True): if self._application is None \ or (self._application_definition_merged and (self._application_definition_merged_with_args or not merge_args)): return if merge_args: current_arguments = self._definition.get_arguments() self._definition.set_arguments(self._application.get_definition().get_arguments()) self._definition.add_arguments(current_arguments) self._application_definition_merged = True if merge_args: self._application_definition_merged_with_args = True PK!\J!poetry/console/commands/search.pyfrom .command import Command class SearchCommand(Command): """ Searches for packages on remote repositories. search { tokens* : The tokens to search for. } { --N|only-name : Search only in name. } """ def handle(self): from poetry.repositories.pypi_repository import PyPiRepository flags = PyPiRepository.SEARCH_FULLTEXT if self.option('only-name'): flags = PyPiRepository.SEARCH_FULLTEXT results = PyPiRepository().search(self.argument('tokens'), flags) for result in results: self.line('') name = '{}'.format( result.name ) name += ' ({})'.format(result.version) self.line(name) if result.description: self.line( ' {}'.format(result.description) ) PK!Z&&(poetry/console/commands/self/__init__.pyfrom .update import SelfUpdateCommand PK!gq&poetry/console/commands/self/update.pyimport os import shutil import subprocess import sys from email.parser import Parser from functools import cmp_to_key from ..command import Command class SelfUpdateCommand(Command): """ Updates poetry to the latest version. self:update { version? : The version to update to. } { --preview : Install prereleases. } """ def handle(self): from poetry.__version__ import __version__ from poetry.repositories.pypi_repository import PyPiRepository from poetry.semver.comparison import less_than version = self.argument('version') if not version: version = '>=' + __version__ repo = PyPiRepository(fallback=False) packages = repo.find_packages('poetry', version) if not packages: self.line('No release found for the specified version') return packages.sort( key=cmp_to_key( lambda x, y: 0 if x.version == y.version else -1 * int(less_than(x.version, y.version) or -1) ) ) release = None for package in reversed(packages): if package.is_prerelease(): if self.option('preview'): release = package break continue release = package break if release is None: self.line('No new release found') return if release.version == __version__: self.line('You are using the latest version') return try: self.update(release) except subprocess.CalledProcessError as e: self.line('') self.output.block([ '[CalledProcessError]', 'An error has occured: {}'.format(str(e)), e.output ], style='error') return e.returncode def update(self, release): from poetry.utils._compat import Path from poetry.utils.helpers import temporary_directory version = release.version self.line('Updating to {}'.format(version)) prefix = sys.prefix base_prefix = getattr(sys, 'base_prefix', None) real_prefix = getattr(sys, 'real_prefix', None) prefix_poetry = Path(prefix) / 'bin' / 'poetry' if prefix_poetry.exists(): pip = (prefix_poetry.parent / 'pip').resolve() elif ( base_prefix and base_prefix != prefix and (Path(base_prefix) / 'bin' / 'poetry').exists() ): pip = Path(base_prefix) / 'bin' / 'pip' elif real_prefix: pip = Path(real_prefix) / 'bin' / 'pip' else: raise RuntimeError('Unable to determine poetry\'s path') with temporary_directory(prefix='poetry-update-') as temp_dir: temp_dir = Path(temp_dir) dist = temp_dir / 'dist' self.line(' - Getting dependencies') self.process( str(pip), 'install', '-U', 'poetry=={}'.format(release.version), '--target', str(dist) ) self.line(' - Vendorizing dependencies') poetry_dir = dist / 'poetry' vendor_dir = poetry_dir / '_vendor' # Everything, except poetry itself, should # be put in the _vendor directory for file in dist.glob('*'): if file.name.startswith('poetry'): continue dest = vendor_dir / file.name if file.is_dir(): shutil.copytree(str(file), str(dest)) shutil.rmtree(str(file)) else: shutil.copy(str(file), str(dest)) os.unlink(str(file)) wheel_data = dist / 'poetry-{}.dist-info'.format(version) / 'WHEEL' with wheel_data.open() as f: wheel_data = Parser().parsestr(f.read()) tag = wheel_data['Tag'] # Repack everything and install self.line(' - Updating poetry') shutil.make_archive( str(temp_dir / 'poetry-{}-{}'.format(version, tag)), format='zip', root_dir=str(dist) ) os.rename( str(temp_dir / 'poetry-{}-{}.zip'.format(version, tag)), str(temp_dir / 'poetry-{}-{}.whl'.format(version, tag)), ) self.process( str(pip), 'install', '--upgrade', '--no-deps', str(temp_dir / 'poetry-{}-{}.whl'.format(version, tag)) ) self.line('') self.line( 'poetry ({}) ' 'successfully installed!'.format( version ) ) def process(self, *args): return subprocess.check_output(list(args), stderr=subprocess.STDOUT) PK!k_##poetry/console/commands/show.py# -*- coding: utf-8 -*- from .venv_command import VenvCommand class ShowCommand(VenvCommand): """ Shows information about packages. show { package? : Package to inspect. } { --t|tree : List the dependencies as a tree. } { --l|latest : Show the latest version. } { --o|outdated : Show the latest version but only for packages that are outdated. } """ help = """The show command displays detailed information about a package, or lists all packages available.""" colors = [ 'green', 'yellow', 'cyan', 'magenta', 'blue', ] def handle(self): package = self.argument('package') if self.option('tree'): self.init_styles() if self.option('outdated'): self.input.set_option('latest', True) installed_repo = self.poetry.locker.locked_repository(True) # Show tree view if requested if self.option('tree') and not package: requires = self.poetry.package.requires + self.poetry.package.dev_requires packages = installed_repo.packages for package in packages: for require in requires: if package.name == require.name: self.display_package_tree(package, installed_repo) break return 0 table = self.table(style='compact') table.get_style().set_vertical_border_char('') locked_packages = installed_repo.packages if package: pkg = None for locked in locked_packages: if package.lower() == locked.name: pkg = locked break if not pkg: raise ValueError('Package {} not found'.format(package)) if self.option('tree'): self.display_package_tree(pkg, installed_repo) return 0 rows = [ ['name', ' : {}'.format(pkg.pretty_name)], ['version', ' : {}'.format(pkg.pretty_version)], ['description', ' : {}'.format(pkg.description)], ] table.add_rows(rows) table.render() if pkg.requires: self.line('') self.line('dependencies') for dependency in pkg.requires: self.line( ' - {} {}'.format( dependency.pretty_name, dependency.pretty_constraint ) ) return 0 show_latest = self.option('latest') terminal = self.get_application().terminal width = terminal.width name_length = version_length = latest_length = 0 latest_packages = {} # Computing widths for locked in locked_packages: name_length = max(name_length, len(locked.pretty_name)) version_length = max(version_length, len(locked.full_pretty_version)) if show_latest: latest = self.find_latest_package(locked) if not latest: latest = locked latest_packages[locked.pretty_name] = latest latest_length = max(latest_length, len(latest.full_pretty_version)) write_version = name_length + version_length + 3 <= width write_latest = name_length + version_length + latest_length + 3 <= width write_description = name_length + version_length + latest_length + 24 <= width for locked in locked_packages: line = '{:{}}'.format(locked.pretty_name, name_length) if write_version: line += ' {:{}}'.format( locked.full_pretty_version, version_length ) if show_latest and write_latest: latest = latest_packages[locked.pretty_name] update_status = self.get_update_status(latest, locked) color = 'green' if update_status == 'semver-safe-update': color = 'red' elif update_status == 'update-possible': color = 'yellow' line += ' {:{}}'.format( color, latest.pretty_version, latest_length ) if self.option('outdated') and update_status == 'up-to-date': continue if write_description: description = locked.description remaining = width - name_length - version_length - 4 if show_latest: remaining -= latest_length if len(locked.description) > remaining: description = description[:remaining-3] + '...' line += ' ' + description self.line(line) def display_package_tree(self, package, installed_repo): self.write('{}'.format(package.pretty_name)) self.line(' {} {}'.format(package.pretty_version, package.description)) dependencies = package.requires dependencies = sorted(dependencies, key=lambda x: x.name) tree_bar = '├' j = 0 total = len(dependencies) for dependency in dependencies: j += 1 if j == total: tree_bar = '└' level = 1 color = self.colors[level] info = '{tree_bar}── <{color}>{name} {constraint}'.format( tree_bar=tree_bar, color=color, name=dependency.name, constraint=dependency.pretty_constraint ) self._write_tree_line(info) tree_bar = tree_bar.replace('└', ' ') packages_in_tree = [package.name, dependency.name] self._display_tree( dependency, installed_repo, packages_in_tree, tree_bar, level + 1 ) def _display_tree(self, dependency, installed_repo, packages_in_tree, previous_tree_bar='├', level=1): previous_tree_bar = previous_tree_bar.replace('├', '│') dependencies = [] for package in installed_repo.packages: if package.name == dependency.name: dependencies = package.requires break dependencies = sorted(dependencies, key=lambda x: x.name) tree_bar = previous_tree_bar + ' ├' i = 0 total = len(dependencies) for dependency in dependencies: i += 1 current_tree = packages_in_tree if i == total: tree_bar = previous_tree_bar + ' └' color_ident = level % len(self.colors) color = self.colors[color_ident] circular_warn = '' if dependency.name in current_tree: circular_warn = '(circular dependency aborted here)' info = '{tree_bar}── <{color}>{name} {constraint} {warn}'.format( tree_bar=tree_bar, color=color, name=dependency.name, constraint=dependency.pretty_constraint, warn=circular_warn ) self._write_tree_line(info) tree_bar = tree_bar.replace('└', ' ') if dependency.name not in current_tree: current_tree.append(dependency.name) self._display_tree( dependency, installed_repo, current_tree, tree_bar, level + 1 ) def _write_tree_line(self, line): if not self.output.is_decorated(): line = line.replace('└', '`-') line = line.replace('├', '|-') line = line.replace('──', '-') line = line.replace('│', '|') self.line(line) def init_styles(self): for color in self.colors: self.set_style(color, color) def find_latest_package(self, package): from poetry.version.version_selector import VersionSelector # find the latest version allowed in this pool if package.source_type == 'git': return name = package.name selector = VersionSelector(self.poetry.pool) return selector.find_best_candidate( name, '>={}'.format(package.pretty_version) ) def get_update_status(self, latest, package): from poetry.semver import statisfies if latest.full_pretty_version == package.full_pretty_version: return 'up-to-date' constraint = '^' + package.pretty_version if latest.version and statisfies(latest.version, constraint): # It needs an immediate semver-compliant upgrade return 'semver-safe-update' # it needs an upgrade but has potential BC breaks so is not urgent return 'update-possible' PK!BZd0!poetry/console/commands/update.pyfrom .venv_command import VenvCommand class UpdateCommand(VenvCommand): """ Update dependencies as according to the pyproject.toml file. update { packages?* : The packages to update. } { --no-dev : Do not install dev dependencies. } { --dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } """ def handle(self): from poetry.installation import Installer packages = self.argument('packages') installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool ) if packages: installer.whitelist({name: '*' for name in packages}) installer.dev_mode(not self.option('no-dev')) installer.dry_run(self.option('dry-run')) # Force update installer.update(True) return installer.run() PK!Y?MJJ'poetry/console/commands/venv_command.pyfrom .command import Command class VenvCommand(Command): def __init__(self, name=None): self._venv = None super(VenvCommand, self).__init__(name) def initialize(self, i, o): from poetry.utils.venv import Venv super(VenvCommand, self).initialize(i, o) self._venv = Venv.create(o, self.poetry.package.name) if self._venv.is_venv() and o.is_verbose(): o.writeln( 'Using virtualenv: {}'.format(self._venv.venv) ) @property def venv(self): return self._venv PK!n=="poetry/console/commands/version.pyimport re from .command import Command class VersionCommand(Command): """ Bumps the version of the project. version { version=patch } """ help = """\ The version command bumps the version of the project and writes the new version back to pyproject.toml. The new version should ideally be a valid semver string or a valid bump rule: patch, minor, major, prepatch, preminor, premajor, prerelease. """ RESERVED = { 'major', 'minor', 'patch', 'premajor', 'preminor', 'prepatch', 'prerelease' } def handle(self): version = self.argument('version') if version in self.RESERVED: version = self.increment_version( self.poetry.package.pretty_version, version ) self.line( 'Bumping version from {} to {}'.format( self.poetry.package.pretty_version, version ) ) content = self.poetry.file.read() poetry_content = content['tool']['poetry'] poetry_content['version'] = version self.poetry.file.write(content) def increment_version(self, version, rule): from poetry.semver.version_parser import VersionParser parser = VersionParser() version_regex = ( 'v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?{}(?:\+[^\s]+)?' ).format(parser._modifier_regex) m = re.match(version_regex, version) if not m: raise ValueError( 'The project\'s version doesn\'t seem to follow semver' ) if m.group(3): index = 2 elif m.group(2): index = 1 else: index = 0 matches = m.groups()[:index+1] base = '.'.join(matches) extra_matches = list(g or '' for g in m.groups()[4:]) extras = version[len('.'.join(matches)):] increment = 1 is_prerelease = (extra_matches[0] or extra_matches[1]) != '' bump_prerelease = rule in { 'premajor', 'preminor', 'prepatch', 'prerelease' } position = -1 if rule in {'major', 'premajor'}: if m.group(1) != '0' or m.group(2) != '0' or not is_prerelease: position = 0 elif rule in {'minor', 'preminor'}: if m.group(2) != '0' or not is_prerelease: position = 1 elif rule in {'patch', 'prepatch'}: if not is_prerelease: position = 2 elif rule == 'prerelease' and not is_prerelease: position = 2 if position != -1: extra_matches[0] = None base = parser._manipulate_version_string( matches, position, increment=increment ) if bump_prerelease: # We bump the prerelease part of the version sep = '' if not extra_matches[0]: extra_matches[0] = 'a' extra_matches[1] = '0' sep = '' else: if extras.startswith(('.', '_', '-')): sep = extras[0] prerelease = extra_matches[1] if not prerelease: prerelease = '.1' psep = '' if prerelease.startswith(('.', '-')): psep = prerelease[0] prerelease = prerelease[1:] new_prerelease = str(int(prerelease) + 1) extra_matches[1] = '{}{}'.format(psep, new_prerelease) extras = '{}{}{}{}'.format( sep, extra_matches[0], extra_matches[1], extra_matches[2] ) else: extras = '' return '.'.join(base.split('.')[:max(index, position)+1]) + extras PK!!poetry/console/styles/__init__.pyPK!4``poetry/console/styles/poetry.pyfrom cleo.styles import CleoStyle from cleo.styles import OutputStyle class PoetryStyle(CleoStyle): def __init__(self, i, o): super(PoetryStyle, self).__init__(i, o) self.output.get_formatter().add_style('warning', 'black', 'yellow') self.output.get_formatter().add_style('question', 'blue') def writeln(self, messages, type=OutputStyle.OUTPUT_NORMAL, verbosity=OutputStyle.VERBOSITY_NORMAL): if self.output.verbosity >= verbosity: super(PoetryStyle, self).writeln(messages, type=type) def write(self, messages, newline=False, type=OutputStyle.OUTPUT_NORMAL, verbosity=OutputStyle.VERBOSITY_NORMAL): if self.output.verbosity >= verbosity: super(PoetryStyle, self).write(messages, newline=newline, type=type) PK!=N2ccpoetry/exceptions.pyclass PoetryException(Exception): pass class InvalidProjectFile(PoetryException): pass PK!!!poetry/installation/__init__.pyfrom .installer import Installer PK!, %poetry/installation/base_installer.pyclass BaseInstaller: def install(self, package): raise NotImplementedError def update(self, source, target): raise NotImplementedError def remove(self, package): raise NotImplementedError PK!mHH poetry/installation/installer.pyimport sys from typing import List from typing import Union from poetry.packages import Dependency from poetry.packages import Locker from poetry.packages import Package from poetry.packages.constraints.generic_constraint import GenericConstraint from poetry.puzzle import Solver from poetry.puzzle.operations import Install from poetry.puzzle.operations import Uninstall from poetry.puzzle.operations import Update from poetry.puzzle.operations.operation import Operation from poetry.repositories import Pool from poetry.repositories import Repository from poetry.repositories.installed_repository import InstalledRepository from poetry.semver.constraints import Constraint from poetry.semver.version_parser import VersionParser from .base_installer import BaseInstaller from .pip_installer import PipInstaller class Installer: def __init__(self, io, venv, package, # type: Package locker, # type: Locker pool, # type: Pool installed=None # type: (Union[InstalledRepository, None]) ): self._io = io self._venv = venv self._package = package self._locker = locker self._pool = pool self._dry_run = False self._update = False self._verbose = False self._write_lock = True self._dev_mode = True self._execute_operations = True self._whitelist = {} self._extras = [] self._installer = self._get_installer() if installed is None: installed = self._get_installed() self._installed_repository = installed @property def installer(self): return self._installer def run(self): # Force update if there is no lock file present if not self._update and not self._locker.is_locked(): self._update = True if self.is_dry_run(): self.verbose(True) self._write_lock = False self._execute_operations = False local_repo = Repository() self._do_install(local_repo) return 0 def dry_run(self, dry_run=True): # type: (bool) -> Installer self._dry_run = dry_run return self def is_dry_run(self): # type: () -> bool return self._dry_run def verbose(self, verbose=True): # type: (bool) -> Installer self._verbose = verbose return self def is_verbose(self): # type: () -> bool return self._verbose def dev_mode(self, dev_mode=True): # type: (bool) -> Installer self._dev_mode = dev_mode return self def is_dev_mode(self): # type: () -> bool return self._dev_mode def update(self, update=True): # type: (bool) -> Installer self._update = update return self def is_updating(self): # type: () -> bool return self._update def execute_operations(self, execute=True): # type: (bool) -> Installer self._execute_operations = execute return self def whitelist(self, packages): # type: (dict) -> Installer self._whitelist = packages return self def extras(self, extras): # type: (list) -> Installer self._extras = extras return self def _do_install(self, local_repo): locked_repository = Repository() if self._update: if self._locker.is_locked(): locked_repository = self._locker.locked_repository(True) # Checking extras for extra in self._extras: if extra not in self._package.extras: raise ValueError( 'Extra [{}] is not specified.'.format(extra) ) self._io.writeln('Updating dependencies') fixed = [] # If the whitelist is enabled, packages not in it are fixed # to the version specified in the lock if self._whitelist: # collect packages to fixate from root requirements candidates = [] for package in locked_repository.packages: candidates.append(package) # fix them to the version in lock if they are not updateable for candidate in candidates: to_fix = True for require in self._whitelist.keys(): if require == candidate.name: to_fix = False if to_fix: fixed.append( Dependency(candidate.name, candidate.version) ) solver = Solver( self._package, self._pool, self._installed_repository, locked_repository, self._io ) request = self._package.requires request += self._package.dev_requires ops = solver.solve(request, fixed=fixed) else: self._io.writeln('Installing dependencies from lock file') locked_repository = self._locker.locked_repository(True) if not self._locker.is_fresh(): self._io.writeln( '' 'Warning: The lock file is not up to date with ' 'the latest changes in pyproject.toml. ' 'You may be getting outdated dependencies. ' 'Run update to update them.' '' ) for extra in self._extras: if extra not in self._locker.lock_data.get('extras', {}): raise ValueError( 'Extra [{}] is not specified.'.format(extra) ) # If we are installing from lock # Filter the operations by comparing it with what is # currently installed ops = self._get_operations_from_lock(locked_repository) self._populate_local_repo(local_repo, ops, locked_repository) # We need to filter operations so that packages # not compatible with the current system, # or optional and not requested, are dropped self._filter_operations(ops, local_repo) self._io.new_line() # Execute operations actual_ops = [op for op in ops if not op.skipped] if not actual_ops and (self._execute_operations or self._dry_run): self._io.writeln('Nothing to install or update') if actual_ops and (self._execute_operations or self._dry_run): installs = [] updates = [] uninstalls = [] skipped = [] for op in ops: if op.skipped: skipped.append(op) continue if op.job_type == 'install': installs.append( '{}:{}'.format( op.package.pretty_name, op.package.full_pretty_version ) ) elif op.job_type == 'update': updates.append( '{}:{}'.format( op.target_package.pretty_name, op.target_package.full_pretty_version ) ) elif op.job_type == 'uninstall': uninstalls.append(op.package.pretty_name) self._io.new_line() self._io.writeln( 'Package operations: ' '{} install{}, ' '{} update{}, ' '{} removal{}' '{}'.format( len(installs), '' if len(installs) == 1 else 's', len(updates), '' if len(updates) == 1 else 's', len(uninstalls), '' if len(uninstalls) == 1 else 's', ', {} skipped'.format( len(skipped) ) if skipped and self.is_verbose() else '' ) ) # Writing lock before installing if self._update and self._write_lock: updated_lock = self._locker.set_lock_data( self._package, local_repo.packages ) if updated_lock: self._io.writeln('') self._io.writeln('Writing lock file') self._io.writeln('') for op in ops: self._execute(op) def _execute(self, operation): # type: (Operation) -> None """ Execute a given operation. """ method = operation.job_type getattr(self, '_execute_{}'.format(method))(operation) def _execute_install(self, operation): # type: (Install) -> None if operation.skipped: if self.is_verbose() and (self._execute_operations or self.is_dry_run()): self._io.writeln( ' - Skipping {} ({}) {}'.format( operation.package.pretty_name, operation.package.full_pretty_version, operation.skip_reason ) ) return if self._execute_operations or self.is_dry_run(): self._io.writeln( ' - Installing {} ({})'.format( operation.package.pretty_name, operation.package.full_pretty_version ) ) if not self._execute_operations: return self._installer.install(operation.package) def _execute_update(self, operation): # type: (Update) -> None source = operation.initial_package target = operation.target_package if operation.skipped: if self.is_verbose() and (self._execute_operations or self.is_dry_run()): self._io.writeln( ' - Skipping {} ({}) {}'.format( target.pretty_name, target.full_pretty_version, operation.skip_reason ) ) return if self._execute_operations or self.is_dry_run(): self._io.writeln( ' - Updating {} ({} -> {})' .format( target.pretty_name, source.pretty_version, target.pretty_version ) ) if not self._execute_operations: return self._installer.update(source, target) def _execute_uninstall(self, operation): # type: (Uninstall) -> None if operation.skipped: if self.is_verbose() and (self._execute_operations or self.is_dry_run()): self._io.writeln( ' - Not removing {} ({}) {}'.format( operation.package.pretty_name, operation.package.full_pretty_version, operation.skip_reason ) ) return if self._execute_operations or self.is_dry_run(): self._io.writeln( ' - Removing {} ({})'.format( operation.package.pretty_name, operation.package.full_pretty_version ) ) if not self._execute_operations: return self._installer.remove(operation.package) def _populate_local_repo(self, local_repo, ops, locked_repository): # Add all locked packages from the lock and go from there for package in locked_repository.packages: if not local_repo.has_package(package): local_repo.add_package(package) # Now, walk through all operations and add/remove/update accordingly for op in ops: if isinstance(op, Update): package = op.target_package else: package = op.package acted_on = False for pkg in local_repo.packages: if pkg.name == package.name: # The package we operate on is in the local repo if op.job_type == 'update': if pkg.version == package.version: break local_repo.remove_package(pkg) local_repo.add_package(op.target_package) elif op.job_type == 'uninstall': local_repo.remove_package(op.package) else: # Even though the package already exists # in the lock file we will prefer the new one # to force updates local_repo.remove_package(pkg) local_repo.add_package(package) acted_on = True if not acted_on: if not local_repo.has_package(package): local_repo.add_package(package) def _get_operations_from_lock(self, locked_repository # type: Repository ): # type: (...) -> List[Operation] installed_repo = self._installed_repository ops = [] extra_packages = [ p.name for p in self._get_extra_packages(locked_repository) ] for locked in locked_repository.packages: is_installed = False for installed in installed_repo.packages: if locked.name == installed.name: is_installed = True if locked.category == 'dev' and not self.is_dev_mode(): ops.append(Uninstall(locked)) elif locked.optional and locked.name not in extra_packages: # Installed but optional and not requested in extras ops.append(Uninstall(locked)) elif locked.version != installed.version: ops.append(Update( installed, locked )) if not is_installed: # If it's optional and not in required extras # we do not install if locked.optional and locked.name not in extra_packages: continue ops.append(Install(locked)) return ops def _filter_operations(self, ops, repo ): # type: (List[Operation], Repository) -> None extra_packages = [p.name for p in self._get_extra_packages(repo)] for op in ops: if isinstance(op, Update): package = op.target_package else: package = op.package if op.job_type == 'uninstall': continue parser = VersionParser() python = '.'.join([str(i) for i in self._venv.version_info[:3]]) if 'python' in package.requirements: python_constraint = parser.parse_constraints( package.requirements['python'] ) if not python_constraint.matches(Constraint('=', python)): # Incompatible python versions op.skip('Not needed for the current python version') continue if not package.python_constraint.matches(Constraint('=', python)): op.skip('Not needed for the current python version') continue if 'platform' in package.requirements: platform_constraint = GenericConstraint.parse( package.requirements['platform'] ) if not platform_constraint.matches( GenericConstraint('=', sys.platform) ): # Incompatible systems op.skip('Not needed for the current platform') continue if self._update: extras = {} for extra, deps in self._package.extras.items(): extras[extra] = [dep.name for dep in deps] else: extras = {} for extra, deps in self._locker.lock_data.get('extras', {}).items(): extras[extra] = [dep.lower() for dep in deps] # If a package is optional and not requested # in any extra we skip it if package.optional: if package.name not in extra_packages: op.skip('Not required') # If the package is a dev package and dev packages # are not requests, we skip it if package.category == 'dev' and not self.is_dev_mode(): op.skip('Dev dependencies not requested') def _get_extra_packages(self, repo): """ Returns all packages required by extras. Maybe we just let the solver handle it? """ if self._update: extras = { k: [d.name for d in v] for k, v in self._package.extras.items() } else: extras = self._locker.lock_data.get('extras', {}) extra_packages = [] for extra_name, packages in extras.items(): if extra_name not in self._extras: continue extra_packages += [Dependency(p, '*') for p in packages] def _extra_packages(packages): pkgs = [] for package in packages: for pkg in repo.packages: if pkg.name == package.name: pkgs.append(package) pkgs += _extra_packages(pkg.requires) break return pkgs return _extra_packages(extra_packages) def _get_installer(self): # type: () -> BaseInstaller return PipInstaller(self._venv, self._io) def _get_installed(self): # type: () -> InstalledRepository return InstalledRepository.load(self._venv) PK!rHnn%poetry/installation/noop_installer.pyfrom .base_installer import BaseInstaller class NoopInstaller(BaseInstaller): def __init__(self): self._installs = [] self._updates = [] self._removals = [] @property def installs(self): return self._installs @property def updates(self): return self._updates @property def removals(self): return self._removals def install(self, package): self._installs.append(package) def update(self, source, target): self._updates.append((source, target)) def remove(self, package): self._removals.append(package) PK!t t $poetry/installation/pip_installer.pyimport os import tempfile from subprocess import CalledProcessError from poetry.utils._compat import encode from poetry.utils.venv import Venv from .base_installer import BaseInstaller class PipInstaller(BaseInstaller): def __init__(self, venv, io): # type: (Venv, ...) -> None self._venv = venv self._io = io def install(self, package, update=False): args = ['install', '--no-deps'] if package.source_type == 'legacy' and package.source_url: args += ['--index-url', package.source_url] if update: args.append('-U') if package.hashes and not package.source_type: # Format as a requirements.txt # We need to create a requirements.txt file # for each package in order to check hashes. # This is far from optimal but we do not have any # other choice since this is the only way for pip # to verify hashes. req = self.create_temporary_requirement(package) args += ['-r', req] try: self.run(*args) finally: os.unlink(req) else: args.append(self.requirement(package)) self.run(*args) def update(self, _, target): self.install(target, update=True) def remove(self, package): try: self.run('uninstall', package.name, '-y') except CalledProcessError as e: if 'not installed' in str(e): return raise def run(self, *args, **kwargs): # type: (...) -> str return self._venv.run('pip', *args, **kwargs) def requirement(self, package, formatted=False): if formatted and not package.source_type: req = '{}=={}'.format(package.name, package.version) for h in package.hashes: req += ' --hash sha256:{}'.format(h) req += '\n' return req if package.source_type == 'file': return os.path.realpath(package.source_reference) if package.source_type == 'git': return 'git+{}@{}#egg={}'.format( package.source_url, package.source_reference, package.name ) return '{}=={}'.format(package.name, package.version) def create_temporary_requirement(self, package): fd, name = tempfile.mkstemp( 'reqs.txt', '{}-{}'.format(package.name, package.version) ) try: os.write(fd, encode(self.requirement(package, formatted=True))) finally: os.close(fd) return name PK!Ypoetry/io/__init__.pyfrom .null_io import NullIO PK!J poetry/io/null_io.pyfrom cleo.inputs import ListInput from cleo.outputs import NullOutput from poetry.console.styles.poetry import PoetryStyle class NullIO(PoetryStyle): def __init__(self): super(NullIO, self).__init__(ListInput([]), NullOutput()) def is_quiet(self): # type: () -> bool return False def is_verbose(self): # type: () -> bool return False def is_very_verbose(self): # type: () -> bool return False def is_debug(self): # type: () -> bool return False def writeln(self, *args, **kwargs): pass def write(self, *args, **kwargs): pass def new_line(self, *args, **kwargs): pass PK!La77poetry/io/raw_argv_input.pyimport sys from cleo.inputs import ArgvInput class RawArgvInput(ArgvInput): def parse(self): self._parsed = self._tokens while True: try: token = self._parsed.pop(0) except IndexError: break self.parse_argument(token) PK!poetry/json/__init__.pyPK!J*,&&&poetry/json/schemas/poetry-schema.json{ "$schema": "http://json-schema.org/draft-04/schema#", "name": "Package", "type": "object", "additionalProperties": false, "required": [ "name", "version", "description" ], "properties": { "name": { "type": "string", "description": "Package name." }, "version": { "type": "string", "description": "Package version." }, "description": { "type": "string", "description": "Short package description." }, "keywords": { "type": "array", "items": { "type": "string", "description": "A tag/keyword that this package relates to." } }, "homepage": { "type": "string", "description": "Homepage URL for the project.", "format": "uri" }, "repository": { "type": "string", "description": "Repository URL for the project.", "format": "uri" }, "documentation": { "type": "string", "description": "Documentation URL for the project.", "format": "uri" }, "license": { "type": "string", "description": "License name." }, "authors": { "$ref": "#/definitions/authors" }, "readme": { "type": "string", "description": "The path to the README file" }, "classifiers": { "type": "array", "description": "A list of trove classifers." }, "dependencies": { "type": "object", "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.", "required": ["python"], "properties": { "python": { "type": "string", "description": "The Python versions the package is compatible with." } }, "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "oneOf": [ { "$ref": "#/definitions/dependency" }, { "$ref": "#/definitions/long-dependency" }, { "$ref": "#/definitions/git-dependency" }, { "$ref": "#/definitions/file-dependency" } ] } }, "additionalProperties": false }, "dev-dependencies": { "type": "object", "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "oneOf": [ { "$ref": "#/definitions/dependency" }, { "$ref": "#/definitions/long-dependency" }, { "$ref": "#/definitions/git-dependency" }, { "$ref": "#/definitions/file-dependency" } ] } }, "additionalProperties": false }, "extras": { "type": "object", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "type": "array", "items": { "type": "string" } } } }, "build": { "type": "string", "description": "The file used to build extensions." }, "source": { "type": "array", "description": "A set of additional repositories where packages can be found.", "additionalProperties": { "$ref": "#/definitions/repository" }, "items": { "$ref": "#/definitions/repository" } }, "scripts": { "type": "object", "description": "A hash of scripts to be installed.", "items": { "type": "string" } }, "plugins": { "type": "object", "description": "A hash of hashes representing plugins", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "type": "object", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "type": "string" } } } } } }, "definitions": { "authors": { "type": "array", "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list.", "items": { "type": "string" } }, "dependency": { "type": "string", "description": "The constraint of the dependency." }, "long-dependency": { "type": "object", "required": ["version"], "additionalProperties": false, "properties": { "version": { "type": "string", "description": "The constraint of the dependency." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "allows-prereleases": { "type": "boolean", "description": "Whether the dependency allows prereleases or not." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } } } }, "git-dependency": { "type": "object", "required": ["git"], "additionalProperties": false, "properties": { "git": { "type": "string", "description": "The url of the git repository.", "format": "uri" }, "branch": { "type": "string", "description": "The branch to checkout." }, "tag": { "type": "string", "description": "The tag to checkout." }, "rev": { "type": "string", "description": "The revision to checkout." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "allows-prereleases": { "type": "boolean", "description": "Whether the dependency allows prereleases or not." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } } } }, "file-dependency": { "type": "object", "required": ["file"], "additionalProperties": false, "properties": { "file": { "type": "string", "description": "The path to the file." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "allows-prereleases": { "type": "boolean", "description": "Whether the dependency allows prereleases or not." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } } } }, "repository": { "type": "object", "properties": { "name": { "type": "string", "description": "The name of the repository" }, "url": { "type": "string", "description": "The url of the repository", "format": "uri" } } } } } PK!(ypoetry/layouts/__init__.pyfrom typing import Type from .layout import Layout from .standard import StandardLayout _LAYOUTS = { 'standard': StandardLayout } def layout(name): # type: (str) -> Type[Layout] if name not in _LAYOUTS: raise ValueError('Invalid layout') return _LAYOUTS[name] PK!QN  poetry/layouts/layout.pyfrom poetry.toml import dumps from poetry.toml import loads from poetry.utils.helpers import module_name from poetry.vcs.git import Git TESTS_DEFAULT = u"""from {package_name} import __version__ def test_version(): assert __version__ == '{version}' """ POETRY_DEFAULT = """\ [tool.poetry] name = "" version = "" description = "" authors = [] [tool.poetry.dependencies] python = "*" [tool.poetry.dev-dependencies] pytest = "^3.5" """ class Layout(object): def __init__(self, project, version='0.1.0', readme_format='md', author=None): self._project = project self._package_name = module_name(project) self._version = version self._readme_format = readme_format self._dependencies = {} self._dev_dependencies = {} self._include = [] self._git = Git() git_config = self._git.config if not author: if ( git_config.get('user.name') and git_config.get('user.email') ): author = u'{} <{}>'.format( git_config['user.name'], git_config['user.email'] ) else: author = 'Your Name ' self._author = author def create(self, path, with_tests=True): self._dependencies = {} self._dev_dependencies = {} self._include = [] path.mkdir(parents=True, exist_ok=True) self._create_default(path) self._create_readme(path) if with_tests: self._create_tests(path) self._write_poetry(path) def _create_default(self, path, src=True): raise NotImplementedError() def _create_readme(self, path): if self._readme_format == 'rst': readme_file = path / 'README.rst' else: readme_file = path / 'README.md' readme_file.touch() def _create_tests(self, path): self._dev_dependencies['pytest'] = '^3.0' tests = path / 'tests' tests_init = tests / '__init__.py' tests_default = tests / 'test_{}.py'.format(self._package_name) tests.mkdir() tests_init.touch(exist_ok=False) with tests_default.open('w') as f: f.write( TESTS_DEFAULT.format( package_name=self._package_name, version=self._version ) ) def _write_poetry(self, path): content = loads(POETRY_DEFAULT) poetry_content = content['tool']['poetry'] poetry_content['name'] = self._project poetry_content['version'] = self._version poetry_content['authors'].append(self._author) poetry = path / 'pyproject.toml' with poetry.open('w') as f: f.write(dumps(content)) PK!g{poetry/layouts/standard.py# -*- coding: utf-8 -*- from .layout import Layout DEFAULT = u"""__version__ = '{version}' """ class StandardLayout(Layout): def _create_default(self, path): package_path = path / self._package_name package_init = package_path / '__init__.py' package_path.mkdir() with package_init.open('w') as f: f.write(DEFAULT.format(version=self._version)) PK!W2 hpoetry/locations.pyfrom .utils.appdirs import user_cache_dir from .utils.appdirs import user_config_dir CACHE_DIR = user_cache_dir('pypoetry') CONFIG_DIR = user_config_dir('pypoetry') PK!ΨDpoetry/masonry/__init__.py""" This module handles the packaging and publishing of python projects. A lot of the code used here has been taken from `flit `__ and adapted to work with the poetry codebase, so kudos to them for showing the way. """ from .builder import Builder PK!poetry/masonry/api.py""" PEP-517 compliant buildsystem API """ import logging from pathlib import Path from poetry.poetry import Poetry from poetry.io import NullIO from poetry.utils.venv import Venv from .builders import SdistBuilder from .builders import WheelBuilder log = logging.getLogger(__name__) # PEP 517 specifies that the CWD will always be the source tree poetry = Poetry.create('.') def get_requires_for_build_wheel(config_settings=None): """ Returns a list of requirements for building, as strings """ main, extras = SdistBuilder.convert_dependencies(poetry.package.requires) return main + extras # For now, we require all dependencies to build either a wheel or an sdist. get_requires_for_build_sdist = get_requires_for_build_wheel def build_wheel(wheel_directory, config_settings=None, metadata_directory=None): """Builds a wheel, places it in wheel_directory""" info = WheelBuilder.make_in(poetry, NullIO(), Path(wheel_directory)) return info.file.name def build_sdist(sdist_directory, config_settings=None): """Builds an sdist, places it in sdist_directory""" path = SdistBuilder(poetry, Venv(), NullIO()).build(Path(sdist_directory)) return path.name PK!ݱTTpoetry/masonry/builder.pyfrom .builders import CompleteBuilder from .builders import SdistBuilder from .builders import WheelBuilder class Builder: _FORMATS = { 'sdist': SdistBuilder, 'wheel': WheelBuilder, 'all': CompleteBuilder } def __init__(self, poetry, venv, io): self._poetry = poetry self._venv = venv self._io = io def build(self, fmt): if fmt not in self._FORMATS: raise ValueError('Invalid format: {}'.format(fmt)) builder = self._FORMATS[fmt](self._poetry, self._venv, self._io) return builder.build() PK!i>$ff#poetry/masonry/builders/__init__.pyfrom .complete import CompleteBuilder from .sdist import SdistBuilder from .wheel import WheelBuilder PK!N"poetry/masonry/builders/builder.py# -*- coding: utf-8 -*- import os import re import shutil import tempfile from collections import defaultdict from contextlib import contextmanager from poetry.semver.constraints import Constraint from poetry.semver.constraints import MultiConstraint from poetry.semver.version_parser import VersionParser from poetry.utils._compat import Path from poetry.vcs import get_vcs from ..metadata import Metadata from ..utils.module import Module AUTHOR_REGEX = re.compile('(?u)^(?P[- .,\w\d\'’"()]+) <(?P.+?)>$') class Builder(object): AVAILABLE_PYTHONS = { '2', '2.7', '3', '3.4', '3.5', '3.6', '3.7' } def __init__(self, poetry, venv, io): self._poetry = poetry self._venv = venv self._io = io self._package = poetry.package self._path = poetry.file.parent self._module = Module( self._package.name, self._path.as_posix() ) self._meta = Metadata.from_package(self._package) def build(self): raise NotImplementedError() def find_excluded_files(self): # type: () -> list # Checking VCS vcs = get_vcs(self._path) if not vcs: return [] ignored = vcs.get_ignored_files() result = [] for file in ignored: try: file = Path(file).absolute().relative_to(self._path) except ValueError: # Should only happen in tests continue result.append(file) return result def find_files_to_add(self, exclude_build=True): # type: () -> list """ Finds all files to add to the tarball TODO: Support explicit include/exclude """ excluded = self.find_excluded_files() src = self._module.path to_add = [] for root, dirs, files in os.walk(src.as_posix()): root = Path(root) if root.name == '__pycache__': continue for file in files: file = root / file file = file.relative_to(self._path) if file in excluded: continue if file.suffix == '.pyc': continue self._io.writeln( ' - Adding: {}'.format(str(file)), verbosity=self._io.VERBOSITY_VERY_VERBOSE ) to_add.append(file) # Include project files self._io.writeln( ' - Adding: pyproject.toml', verbosity=self._io.VERBOSITY_VERY_VERBOSE ) to_add.append(Path('pyproject.toml')) # If a README is specificed we need to include it # to avoid errors if 'readme' in self._poetry.local_config: readme = self._path / self._poetry.local_config['readme'] if readme.exists(): self._io.writeln( ' - Adding: {}'.format( readme.relative_to(self._path) ), verbosity=self._io.VERBOSITY_VERY_VERBOSE ) to_add.append(readme.relative_to(self._path)) # If a build script is specified and explicitely required # we add it to the list of files if self._package.build and not exclude_build: to_add.append(Path(self._package.build)) return sorted(to_add) def convert_entry_points(self): # type: () -> dict result = defaultdict(list) # Scripts -> Entry points for name, ep in self._poetry.local_config.get('scripts', {}).items(): result['console_scripts'].append('{} = {}'.format(name, ep)) # Plugins -> entry points plugins = self._poetry.local_config.get('plugins', {}) for groupname, group in plugins.items(): for name, ep in sorted(group.items()): result[groupname].append('{} = {}'.format(name, ep)) for groupname in result: result[groupname] = sorted(result[groupname]) return dict(result) @classmethod def convert_author(cls, author): # type: () -> dict m = AUTHOR_REGEX.match(author) name = m.group('name') email = m.group('email') return { 'name': name, 'email': email } def get_classifers(self): classifiers = [] # Automatically set python classifiers parser = VersionParser() if self._package.python_versions == '*': python_constraint = parser.parse_constraints('~2.7 || ^3.4') else: python_constraint = self._package.python_constraint for version in sorted(self.AVAILABLE_PYTHONS): if python_constraint.matches(Constraint('=', version)): classifiers.append( 'Programming Language :: Python :: {}'.format(version) ) return classifiers def convert_python_version(self): constraint = self._package.python_constraint if isinstance(constraint, MultiConstraint): python_requires = ','.join( [str(c).replace(' ', '') for c in constraint.constraints] ) else: python_requires = str(constraint).replace(' ', '') return python_requires @classmethod @contextmanager def temporary_directory(cls, *args, **kwargs): try: from tempfile import TemporaryDirectory with TemporaryDirectory(*args, **kwargs) as name: yield name except ImportError: name = tempfile.mkdtemp(*args, **kwargs) yield name shutil.rmtree(name) PK!++#poetry/masonry/builders/complete.pyimport os import tarfile import poetry.poetry from contextlib import contextmanager from .builder import Builder from .sdist import SdistBuilder from .wheel import WheelBuilder class CompleteBuilder(Builder): def build(self): # We start by building the tarball # We will use it to build the wheel sdist_builder = SdistBuilder(self._poetry, self._venv, self._io) sdist_file = sdist_builder.build() self._io.writeln('') dist_dir = self._path / 'dist' with self.unpacked_tarball(sdist_file) as tmpdir: WheelBuilder.make_in( poetry.poetry.Poetry.create(tmpdir), self._venv, self._io, dist_dir, original=self._poetry ) @classmethod @contextmanager def unpacked_tarball(cls, path): tf = tarfile.open(str(path)) with cls.temporary_directory() as tmpdir: tf.extractall(tmpdir) files = os.listdir(tmpdir) assert len(files) == 1, files yield os.path.join(tmpdir, files[0]) PK!]ދ)) poetry/masonry/builders/sdist.py# -*- coding: utf-8 -*- import os import re import tarfile from collections import defaultdict from copy import copy from gzip import GzipFile from io import BytesIO from posixpath import join as pjoin from pprint import pformat from typing import List from poetry.packages import Dependency from poetry.utils._compat import Path from poetry.utils._compat import encode from poetry.utils._compat import to_str from ..utils.helpers import normalize_file_permissions from .builder import Builder SETUP = """\ # -*- coding: utf-8 -*- from distutils.core import setup {before} setup_kwargs = {{ 'name': {name!r}, 'version': {version!r}, 'description': {description!r}, 'long_description': {long_description!r}, 'author': {author!r}, 'author_email': {author_email!r}, 'url': {url!r}, {extra} }} {after} setup(**setup_kwargs) """ PKG_INFO = """\ Metadata-Version: 2.1 Name: {name} Version: {version} Summary: {summary} Home-page: {home_page} Author: {author} Author-email: {author_email} """ class SdistBuilder(Builder): def build(self, target_dir=None): # type: (Path) -> Path self._io.writeln(' - Building sdist') if target_dir is None: target_dir = self._path / 'dist' if not target_dir.exists(): target_dir.mkdir(parents=True) target = target_dir / '{}-{}.tar.gz'.format( self._package.pretty_name, self._package.version ) gz = GzipFile(target.as_posix(), mode='wb') tar = tarfile.TarFile(target.as_posix(), mode='w', fileobj=gz, format=tarfile.PAX_FORMAT) try: tar_dir = '{}-{}'.format( self._package.pretty_name, self._package.version ) files_to_add = self.find_files_to_add(exclude_build=False) for relpath in files_to_add: path = self._path / relpath tar_info = tar.gettarinfo( str(path), arcname=pjoin(tar_dir, str(relpath)) ) tar_info = self.clean_tarinfo(tar_info) if tar_info.isreg(): with path.open('rb') as f: tar.addfile(tar_info, f) else: tar.addfile(tar_info) # Symlinks & ? setup = self.build_setup() tar_info = tarfile.TarInfo(pjoin(tar_dir, 'setup.py')) tar_info.size = len(setup) tar.addfile(tar_info, BytesIO(setup)) pkg_info = self.build_pkg_info() tar_info = tarfile.TarInfo(pjoin(tar_dir, 'PKG-INFO')) tar_info.size = len(pkg_info) tar.addfile(tar_info, BytesIO(pkg_info)) finally: tar.close() gz.close() self._io.writeln(' - Built {}'.format(target.name)) return target def build_setup(self): # type: () -> bytes before, extra, after = [], [], [] # If we have a build script, use it if self._package.build: after += [ 'from {} import *'.format(self._package.build.split('.')[0]), 'build(setup_kwargs)' ] if self._module.is_package(): packages, package_data = self.find_packages( self._module.path.as_posix() ) before.append("packages = \\\n{}\n".format(pformat(sorted(packages)))) before.append("package_data = \\\n{}\n".format(pformat(package_data))) extra.append("'packages': packages,") extra.append("'package_data': package_data,") else: extra.append("'py_modules': {!r},".format(self._module.name)) dependencies, extras = self.convert_dependencies( self._package, self._package.requires ) if dependencies: before.append("install_requires = \\\n{}\n".format(pformat(dependencies))) extra.append("'install_requires': install_requires,") if extras: before.append("extras_require = \\\n{}\n".format(pformat(extras))) extra.append("'extras_require': extras_require,") entry_points = self.convert_entry_points() if entry_points: before.append("entry_points = \\\n{}\n".format(pformat(entry_points))) extra.append("'entry_points': entry_points,") if self._package.python_versions != '*': python_requires = self._meta.requires_python extra.append("'python_requires': {!r},".format(python_requires)) return encode(SETUP.format( before='\n'.join(before), name=to_str(self._meta.name), version=to_str(self._meta.version), description=to_str(self._meta.summary), long_description=to_str(self._meta.description), author=to_str(self._meta.author), author_email=to_str(self._meta.author_email), url=to_str(self._meta.home_page), extra='\n '.join(extra), after='\n'.join(after) )) def build_pkg_info(self): pkg_info = PKG_INFO.format( name=self._meta.name, version=self._meta.version, summary=self._meta.summary, home_page=self._meta.home_page, author=to_str(self._meta.author), author_email=to_str(self._meta.author_email), ) if self._meta.keywords: pkg_info += "Keywords: {}\n".format(self._meta.keywords) if self._meta.requires_python: pkg_info += 'Requires-Python: {}\n'.format( self._meta.requires_python ) for classifier in self._meta.classifiers: pkg_info += 'Classifier: {}\n'.format(classifier) for extra in sorted(self._meta.provides_extra): pkg_info += 'Provides-Extra: {}\n'.format(extra) for dep in sorted(self._meta.requires_dist): pkg_info += 'Requires-Dist: {}\n'.format(dep) return encode(pkg_info) @classmethod def find_packages(cls, path): """ Discover subpackages and data. It also retrieve necessary files """ pkgdir = os.path.normpath(path) pkg_name = os.path.basename(pkgdir) pkg_data = defaultdict(list) # Undocumented distutils feature: # the empty string matches all package names pkg_data[''].append('*') packages = [pkg_name] subpkg_paths = set() def find_nearest_pkg(rel_path): parts = rel_path.split(os.sep) for i in reversed(range(1, len(parts))): ancestor = '/'.join(parts[:i]) if ancestor in subpkg_paths: pkg = '.'.join([pkg_name] + parts[:i]) return pkg, '/'.join(parts[i:]) # Relative to the top-level package return pkg_name, rel_path for path, dirnames, filenames in os.walk(pkgdir, topdown=True): if os.path.basename(path) == '__pycache__': continue from_top_level = os.path.relpath(path, pkgdir) if from_top_level == '.': continue is_subpkg = '__init__.py' in filenames if is_subpkg: subpkg_paths.add(from_top_level) parts = from_top_level.split(os.sep) packages.append('.'.join([pkg_name] + parts)) else: pkg, from_nearest_pkg = find_nearest_pkg(from_top_level) pkg_data[pkg].append(pjoin(from_nearest_pkg, '*')) # Sort values in pkg_data pkg_data = {k: sorted(v) for (k, v) in pkg_data.items()} return sorted(packages), pkg_data @classmethod def convert_dependencies(cls, package, # type: Package dependencies # type: List[Dependency] ): main = [] extras = defaultdict(list) req_regex = re.compile('^(.+) \((.+)\)$') for dependency in dependencies: if dependency.is_optional(): for extra_name, reqs in package.extras.items(): for req in reqs: if req.name == dependency.name: requirement = to_str( dependency.to_pep_508(with_extras=False) ) if ';' in requirement: requirement, conditions = requirement.split(';') requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub('\\1\\2', requirement.strip()) extras[extra_name + ':' + conditions.strip()].append(requirement) continue requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub('\\1\\2', requirement.strip()) extras[extra_name].append(requirement) continue requirement = to_str(dependency.to_pep_508()) if ';' in requirement: requirement, conditions = requirement.split(';') requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub('\\1\\2', requirement.strip()) extras[':' + conditions.strip()].append(requirement) continue requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub('\\1\\2', requirement.strip()) main.append(requirement) return main, dict(extras) @classmethod def clean_tarinfo(cls, tar_info): """ Clean metadata from a TarInfo object to make it more reproducible. - Set uid & gid to 0 - Set uname and gname to "" - Normalise permissions to 644 or 755 - Set mtime if not None """ ti = copy(tar_info) ti.uid = 0 ti.gid = 0 ti.uname = '' ti.gname = '' ti.mode = normalize_file_permissions(ti.mode) return ti PK!u#,#, poetry/masonry/builders/wheel.pyfrom __future__ import unicode_literals import contextlib import hashlib import os import re import tempfile import shutil import stat try: import zipfile36 as zipfile except ImportError: import zipfile from base64 import urlsafe_b64encode from io import StringIO from poetry.__version__ import __version__ from poetry.semver.constraints import Constraint from poetry.semver.constraints import MultiConstraint from poetry.utils._compat import Path from ..utils.helpers import normalize_file_permissions from ..utils.tags import get_abbr_impl from ..utils.tags import get_abi_tag from ..utils.tags import get_impl_ver from ..utils.tags import get_platform from .builder import Builder wheel_file_template = """\ Wheel-Version: 1.0 Generator: poetry {version} Root-Is-Purelib: {pure_lib} Tag: {tag} """ class WheelBuilder(Builder): def __init__(self, poetry, venv, io, target_fp, original=None): super(WheelBuilder, self).__init__(poetry, venv, io) self._records = [] self._original_path = self._path if original: self._original_path = original.file.parent # Open the zip file ready to write self._wheel_zip = zipfile.ZipFile(target_fp, 'w', compression=zipfile.ZIP_DEFLATED) @classmethod def make_in(cls, poetry, venv, io, directory, original=None): # We don't know the final filename until metadata is loaded, so write to # a temporary_file, and rename it afterwards. (fd, temp_path) = tempfile.mkstemp(suffix='.whl', dir=str(directory)) os.close(fd) try: with open(temp_path, 'w+b') as fp: wb = WheelBuilder(poetry, venv, io, fp, original=original) wb.build() wheel_path = directory / wb.wheel_filename if wheel_path.exists(): os.unlink(str(wheel_path)) os.rename(temp_path, str(wheel_path)) except: os.unlink(temp_path) raise @classmethod def make(cls, poetry, venv, io): """Build a wheel in the dist/ directory, and optionally upload it. """ dist_dir = poetry.file.parent / 'dist' try: dist_dir.mkdir() except FileExistsError: pass cls.make_in(poetry, venv, io, dist_dir) def build(self): self._io.writeln(' - Building wheel') try: self._build() self.copy_module() self.write_metadata() self.write_record() finally: self._wheel_zip.close() self._io.writeln(' - Built {}'.format(self.wheel_filename)) def _build(self): if self._package.build: setup = self._path / 'setup.py' # We need to place ourselves in the temporary # directory in order to build the package current_path = os.getcwd() try: os.chdir(str(self._path)) self._venv.run( 'python', str(setup), 'build', '-b', str(self._path / 'build') ) finally: os.chdir(current_path) build_dir = self._path / 'build' lib = list(build_dir.glob('lib.*')) if not lib: # The result of building the extensions # does not exist, this may due to conditional # builds, so we assume that it's okay return lib = lib[0] for pkg in lib.glob('*'): shutil.rmtree(str(self._path / pkg.name)) shutil.copytree(str(pkg), str(self._path / pkg.name)) def copy_module(self): if self._module.is_package(): files = self.find_files_to_add() # Walk the files and compress them, # sorting everything so the order is stable. for file in sorted(files): full_path = self._path / file # Do not include topmost files if full_path.relative_to(self._path) == Path(file.name): continue self._add_file(full_path, file) else: self._add_file(str(self._module.path), self._module.path.name) def write_metadata(self): if ( 'scripts' in self._poetry.local_config or 'plugins' in self._poetry.local_config ): with self._write_to_zip(self.dist_info + '/entry_points.txt') as f: self._write_entry_points(f) for base in ('COPYING', 'LICENSE'): for path in sorted(self._path.glob(base + '*')): self._add_file(path, '%s/%s' % (self.dist_info, path.name)) with self._write_to_zip(self.dist_info + '/WHEEL') as f: self._write_wheel_file(f) with self._write_to_zip(self.dist_info + '/METADATA') as f: self._write_metadata_file(f) def write_record(self): # Write a record of the files in the wheel with self._write_to_zip(self.dist_info + '/RECORD') as f: for path, hash, size in self._records: f.write('{},sha256={},{}\n'.format(path, hash, size)) # RECORD itself is recorded with no hash or size f.write(self.dist_info + '/RECORD,,\n') def find_excluded_files(self): # type: () -> list # Checking VCS return [] @property def dist_info(self): # type: () -> str return self.dist_info_name(self._package.name, self._package.version) @property def wheel_filename(self): # type: () -> str return '{}-{}-{}.whl'.format( re.sub("[^\w\d.]+", "_", self._package.pretty_name, flags=re.UNICODE), re.sub("[^\w\d.]+", "_", self._package.version, flags=re.UNICODE), self.tag ) def supports_python2(self): return self._package.python_constraint.matches( MultiConstraint([ Constraint('>=', '2.0.0'), Constraint('<', '3.0.0') ]) ) def dist_info_name(self, distribution, version): # type: (...) -> str escaped_name = re.sub("[^\w\d.]+", "_", distribution, flags=re.UNICODE) escaped_version = re.sub("[^\w\d.]+", "_", version, flags=re.UNICODE) return '{}-{}.dist-info'.format(escaped_name, escaped_version) @property def tag(self): if self._package.build: platform = get_platform().replace('.', '_').replace('-', '_') impl_name = get_abbr_impl(self._venv) impl_ver = get_impl_ver(self._venv) impl = impl_name + impl_ver abi_tag = str(get_abi_tag(self._venv)).lower() tag = (impl, abi_tag, platform) else: platform = 'any' if self.supports_python2(): impl = 'py2.py3' else: impl = 'py3' tag = (impl, 'none', platform) return '-'.join(tag) def _add_file(self, full_path, rel_path): full_path, rel_path = str(full_path), str(rel_path) if os.sep != '/': # We always want to have /-separated paths in the zip file and in # RECORD rel_path = rel_path.replace(os.sep, '/') zinfo = zipfile.ZipInfo(rel_path) # Normalize permission bits to either 755 (executable) or 644 st_mode = os.stat(full_path).st_mode new_mode = normalize_file_permissions(st_mode) zinfo.external_attr = (new_mode & 0xFFFF) << 16 # Unix attributes if stat.S_ISDIR(st_mode): zinfo.external_attr |= 0x10 # MS-DOS directory flag hashsum = hashlib.sha256() with open(full_path, 'rb') as src: while True: buf = src.read(1024 * 8) if not buf: break hashsum.update(buf) src.seek(0) self._wheel_zip.writestr(zinfo, src.read()) size = os.stat(full_path).st_size hash_digest = urlsafe_b64encode( hashsum.digest() ).decode('ascii').rstrip('=') self._records.append((rel_path, hash_digest, size)) @contextlib.contextmanager def _write_to_zip(self, rel_path): sio = StringIO() yield sio # The default is a fixed timestamp rather than the current time, so # that building a wheel twice on the same computer can automatically # give you the exact same result. date_time = (2016, 1, 1, 0, 0, 0) zi = zipfile.ZipInfo(rel_path, date_time) b = sio.getvalue().encode('utf-8') hashsum = hashlib.sha256(b) hash_digest = urlsafe_b64encode( hashsum.digest() ).decode('ascii').rstrip('=') self._wheel_zip.writestr(zi, b, compress_type=zipfile.ZIP_DEFLATED) self._records.append((rel_path, hash_digest, len(b))) def _write_entry_points(self, fp): """ Write entry_points.txt. """ entry_points = self.convert_entry_points() for group_name in sorted(entry_points): fp.write('[{}]\n'.format(group_name)) for ep in sorted(entry_points[group_name]): fp.write(ep.replace(' ', '') + '\n') fp.write('\n') def _write_wheel_file(self, fp): fp.write( wheel_file_template.format( version=__version__, pure_lib='true' if self._package.build is None else 'false', tag=self.tag ) ) def _write_metadata_file(self, fp): """ Write out metadata in the 2.x format (email like) """ fp.write('Metadata-Version: 2.1\n') fp.write('Name: {}\n'.format(self._meta.name)) fp.write('Version: {}\n'.format(self._meta.version)) fp.write('Summary: {}\n'.format(self._meta.summary)) fp.write('Home-page: {}\n'.format(self._meta.home_page or 'UNKNOWN')) fp.write('License: {}\n'.format(self._meta.license or 'UNKOWN')) # Optional fields if self._meta.keywords: fp.write("Keywords: {}\n".format(self._meta.keywords)) if self._meta.author: fp.write('Author: {}\n'.format(self._meta.author)) if self._meta.author_email: fp.write('Author-email: {}\n'.format(self._meta.author_email)) if self._meta.requires_python: fp.write('Requires-Python: {}\n'.format(self._meta.requires_python)) for classifier in self._meta.classifiers: fp.write('Classifier: {}\n'.format(classifier)) for extra in sorted(self._meta.provides_extra): fp.write('Provides-Extra: {}\n'.format(extra)) for dep in sorted(self._meta.requires_dist): fp.write('Requires-Dist: {}\n'.format(dep)) if self._meta.description_content_type: fp.write('Description-Content-Type: ' '{}\n'.format(self._meta.description_content_type)) if self._meta.description is not None: fp.write('\n' + self._meta.description + '\n') PK!( poetry/masonry/metadata.pyfrom poetry.utils.helpers import canonicalize_name from poetry.version.helpers import format_python_constraint class Metadata: metadata_version = '2.1' # version 1.0 name = None version = None platforms = () supported_platforms = () summary = None description = None keywords = None home_page = None download_url = None author = None author_email = None license = None # version 1.1 classifiers = () requires = () provides = () obsoletes = () # version 1.2 maintainer = None maintainer_email = None requires_python = None requires_external = () requires_dist = [] provides_dist = () obsoletes_dist = () project_urls = () # Version 2.1 description_content_type = None provides_extra = [] @classmethod def from_package(cls, package): # type: (...) -> Metadata meta = cls() meta.name = canonicalize_name(package.name) meta.version = package.version meta.summary = package.description if package.readme: with package.readme.open() as f: meta.description = f.read() meta.keywords = ','.join(package.keywords) meta.home_page = package.homepage or package.repository_url meta.author = package.author_name meta.author_email = package.author_email if package.license: meta.license = package.license.id meta.classifiers = package.all_classifiers # Version 1.2 meta.maintainer = meta.author meta.maintainer_email = meta.author_email meta.requires_python = package.python_constraint meta.requires_dist = [d.to_pep_508() for d in package.requires] # Requires python meta.requires_python = format_python_constraint(package.python_constraint) # Version 2.1 if package.readme: if package.readme.suffix == '.rst': meta.description_content_type = 'text/x-rst' elif package.readme.suffix in ['.md', '.markdown']: meta.description_content_type = 'text/markdown' else: meta.description_content_type = 'text/plain' meta.provides_extra = [e for e in package.extras] return meta PK!5k!!%poetry/masonry/publishing/__init__.pyfrom .publisher import Publisher PK!f &poetry/masonry/publishing/publisher.pyimport toml from poetry.locations import CONFIG_DIR from poetry.utils._compat import Path from .uploader import Uploader class Publisher: """ Registers and publishes packages to remote repositories. """ def __init__(self, poetry, io): self._poetry = poetry self._package = poetry.package self._io = io self._uploader = Uploader(poetry, io) def publish(self, repository_name): if repository_name: self._io.writeln( 'Publishing {} ({}) ' 'to {}'.format( self._package.pretty_name, self._package.pretty_version, repository_name ) ) else: self._io.writeln( 'Publishing {} ({}) ' 'to PyPI'.format( self._package.pretty_name, self._package.pretty_version ) ) if not repository_name: url = 'https://upload.pypi.org/legacy/' repository_name = 'pypi' else: # Retrieving config information config_file = Path(CONFIG_DIR) / 'config.toml' if not config_file.exists(): raise RuntimeError( 'Config file does not exist. ' 'Unable to get repository information' ) with config_file.open() as f: config = toml.loads(f.read()) if ( 'repositories' not in config or repository_name not in config['repositories'] ): raise RuntimeError( 'Repository {} is not defined'.format(repository_name) ) url = config['repositories'][repository_name]['url'] username = None password = None auth_file = Path(CONFIG_DIR) / 'auth.toml' if auth_file.exists(): with auth_file.open() as f: auth_config = toml.loads(f.read()) if 'http-basic' in auth_config and repository_name in auth_config['http-basic']: config = auth_config['http-basic'][repository_name] username = config.get('username') password = config.get('password') # Requesting missing credentials if not username: username = self._io.ask('Username:') if not password: password = self._io.ask_hidden('Password:') # TODO: handle certificates self._uploader.auth(username, password) return self._uploader.upload(url) PK!cA""%poetry/masonry/publishing/uploader.pyimport hashlib import io import re import requests from requests import adapters from requests.exceptions import HTTPError from requests.packages.urllib3 import util from requests_toolbelt import user_agent from requests_toolbelt.multipart import ( MultipartEncoder, MultipartEncoderMonitor ) from poetry.__version__ import __version__ from ..metadata import Metadata wheel_file_re = re.compile( r"""^(?P(?P.+?)(-(?P\d.+?))?) ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) \.whl|\.dist-info)$""", re.VERBOSE ) _has_blake2 = hasattr(hashlib, 'blake2b') class Uploader: def __init__(self, poetry, io): self._poetry = poetry self._package = poetry.package self._io = io self._username = None self._password = None @property def user_agent(self): return user_agent('poetry', __version__) @property def adapter(self): retry = util.Retry( connect=5, total=10, method_whitelist=['GET'], status_forcelist=[500, 501, 502, 503], ) return adapters.HTTPAdapter(max_retries=retry) def auth(self, username, password): self._username = username self._password = password def make_session(self): session = requests.session() if self.is_authenticated(): session.auth = (self._username, self._password) session.headers['User-Agent'] = self.user_agent for scheme in ('http://', 'https://'): session.mount(scheme, self.adapter) return session def is_authenticated(self): return self._username is not None and self._password is not None def upload(self, url): session = self.make_session() try: self._upload(session, url) finally: session.close() def post_data(self, file): meta = Metadata.from_package(self._package) file_type = self._get_type(file) if _has_blake2: blake2_256_hash = hashlib.blake2b(digest_size=256 // 8) md5_hash = hashlib.md5() sha256_hash = hashlib.sha256() with file.open('rb') as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b''): md5_hash.update(content) sha256_hash.update(content) if _has_blake2: blake2_256_hash.update(content) md5_digest = md5_hash.hexdigest() sha2_digest = sha256_hash.hexdigest() if _has_blake2: blake2_256_digest = blake2_256_hash.hexdigest() else: blake2_256_digest = None if file_type == 'bdist_wheel': wheel_info = wheel_file_re.match(file.name) py_version = wheel_info.group("pyver") else: py_version = None data = { # identify release "name": meta.name, "version": meta.version, # file content "filetype": file_type, "pyversion": py_version, # additional meta-data "metadata_version": meta.metadata_version, "summary": meta.summary, "home_page": meta.home_page, "author": meta.author, "author_email": meta.author_email, "maintainer": meta.maintainer, "maintainer_email": meta.maintainer_email, "license": meta.license, "description": meta.description, "keywords": meta.keywords, "platform": meta.platforms, "classifiers": meta.classifiers, "download_url": meta.download_url, "supported_platform": meta.supported_platforms, "comment": None, "md5_digest": md5_digest, "sha256_digest": sha2_digest, "blake2_256_digest": blake2_256_digest, # PEP 314 "provides": meta.provides, "requires": meta.requires, "obsoletes": meta.obsoletes, # Metadata 1.2 "project_urls": meta.project_urls, "provides_dist": meta.provides_dist, "obsoletes_dist": meta.obsoletes_dist, "requires_dist": meta.requires_dist, "requires_external": meta.requires_external, "requires_python": meta.requires_python, } # Metadata 2.1 if meta.description_content_type: data['description_content_type'] = meta.description_content_type # TODO: Provides extra return data def _upload(self, session, url): try: self._do_upload(session, url) except HTTPError as e: if ( e.response.status_code not in (403, 400) or e.response.status_code == 400 and 'was ever registered' not in e.response.text ): raise # It may be the first time we publish the package # We'll try to register it and go from there try: self._register(session, url) except HTTPError: raise def _do_upload(self, session, url): dist = self._poetry.file.parent / 'dist' packages = dist.glob( '{}-{}*'.format(self._package.name, self._package.version) ) files = ( i for i in packages if ( i.match( '{}-{}-*.whl'.format( self._package.name, self._package.version ) ) or i.match( '{}-{}.tar.gz'.format( self._package.name, self._package.version ) ) ) ) for file in files: # TODO: Check existence resp = self._upload_file(session, url, file) resp.raise_for_status() def _upload_file(self, session, url, file): data = self.post_data(file) data.update({ # action ":action": "file_upload", "protocol_version": "1", }) data_to_send = self._prepare_data(data) with file.open('rb') as fp: data_to_send.append(( "content", (file.name, fp, "application/octet-stream"), )) encoder = MultipartEncoder(data_to_send) bar = self._io.create_progress_bar(encoder.len) bar.set_format( " - Uploading {0} %percent%%".format( file.name ) ) monitor = MultipartEncoderMonitor( encoder, lambda monitor: bar.set_progress(monitor.bytes_read) ) bar.start() resp = session.post( url, data=monitor, allow_redirects=False, headers={'Content-Type': monitor.content_type} ) if resp.ok: bar.finish() self._io.writeln('') else: self._io.overwrite('') return resp def _register(self, session, url): """ Register a package to a repository. """ dist = self._poetry.file.parent / 'dist' file = dist / '{}-{}.tar.gz'.format( self._package.name, self._package.version ) if not file.exists(): raise RuntimeError( '"{0}" does not exist.'.format(file.name) ) data = self.post_data(file) data.update({ ":action": "submit", "protocol_version": "1", }) data_to_send = self._prepare_data(data) encoder = MultipartEncoder(data_to_send) resp = session.post( url, data=encoder, allow_redirects=False, headers={'Content-Type': encoder.content_type}, ) return resp def _prepare_data(self, data): data_to_send = [] for key, value in data.items(): if not isinstance(value, (list, tuple)): data_to_send.append((key, value)) else: for item in value: data_to_send.append((key, item)) return data_to_send def _get_type(self, file): exts = file.suffixes if exts[-1] == '.whl': return 'bdist_wheel' elif len(exts) >= 2 and ''.join(exts[-2:]) == '.tar.gz': return 'sdist' raise ValueError( 'Unknown distribution format {}'.format(''.join(exts)) ) PK! poetry/masonry/utils/__init__.pyPK!ƨC88poetry/masonry/utils/helpers.pydef normalize_file_permissions(st_mode): """ Normalizes the permission bits in the st_mode field from stat to 644/755 Popular VCSs only track whether a file is executable or not. The exact permissions can vary on systems with different umasks. Normalising to 644 (non executable) or 755 (executable) makes builds more reproducible. """ # Set 644 permissions, leaving higher bits of st_mode unchanged new_mode = (st_mode | 0o644) & ~0o133 if st_mode & 0o100: new_mode |= 0o111 # Executable: 644 -> 755 return new_mode PK!M}poetry/masonry/utils/module.pyfrom poetry.utils._compat import Path from poetry.utils.helpers import module_name class Module: def __init__(self, name, directory='.'): self._name = module_name(name) # It must exist either as a .py file or a directory, but not both pkg_dir = Path(directory, self._name) py_file = Path(directory, self._name + '.py') if pkg_dir.is_dir() and py_file.is_file(): raise ValueError("Both {} and {} exist".format(pkg_dir, py_file)) elif pkg_dir.is_dir(): self._path = pkg_dir self._is_package = True elif py_file.is_file(): self._path = py_file self._is_package = False else: raise ValueError("No file/folder found for package {}".format(name)) @property def name(self): # type: () -> str return self._name @property def path(self): # type: () -> Path return self._path @property def file(self): # type: () -> Path if self._is_package: return self._path / '__init__.py' else: return self._path def is_package(self): # type: () -> bool return self._is_package PK!УRyypoetry/masonry/utils/tags.py""" Generate and work with PEP 425 Compatibility Tags. Base implementation taken from https://github.com/pypa/wheel/blob/master/wheel/pep425tags.py and adapted to work with poetry's venv util. """ from __future__ import unicode_literals import distutils.util import sys import warnings def get_abbr_impl(venv): """Return abbreviated implementation name.""" impl = venv.python_implementation if impl == 'PyPy': return 'pp' elif impl == 'Jython': return 'jy' elif impl == 'IronPython': return 'ip' elif impl == 'CPython': return 'cp' raise LookupError('Unknown Python implementation: ' + impl) def get_impl_ver(venv): """Return implementation version.""" impl_ver = venv.config_var("py_version_nodot") if not impl_ver or get_abbr_impl(venv) == 'pp': impl_ver = ''.join(map(str, get_impl_version_info(venv))) return impl_ver def get_impl_version_info(venv): """Return sys.version_info-like tuple for use in decrementing the minor version.""" if get_abbr_impl(venv) == 'pp': # as per https://github.com/pypa/pip/issues/2882 return venv.version_info[:3] else: return venv.version_info[:2] def get_flag(venv, var, fallback, expected=True, warn=True): """Use a fallback method for determining SOABI flags if the needed config var is unset or unavailable.""" val = venv.config_var(var) if val is None: if warn: warnings.warn("Config variable '{0}' is unset, Python ABI tag may " "be incorrect".format(var), RuntimeWarning, 2) return fallback() return val == expected def get_abi_tag(venv): """Return the ABI tag based on SOABI (if available) or emulate SOABI (CPython 2, PyPy).""" soabi = venv.config_var('SOABI') impl = get_abbr_impl(venv) if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'): d = '' m = '' u = '' if get_flag(venv, 'Py_DEBUG', lambda: hasattr(sys, 'gettotalrefcount'), warn=(impl == 'cp')): d = 'd' if get_flag(venv, 'WITH_PYMALLOC', lambda: impl == 'cp', warn=(impl == 'cp')): m = 'm' if get_flag(venv, 'Py_UNICODE_SIZE', lambda: sys.maxunicode == 0x10ffff, expected=4, warn=(impl == 'cp' and venv.version_info < (3, 3))) \ and venv.version_info < (3, 3): u = 'u' abi = '%s%s%s%s%s' % (impl, get_impl_ver(venv), d, m, u) elif soabi and soabi.startswith('cpython-'): abi = 'cp' + soabi.split('-')[1] elif soabi: abi = soabi.replace('.', '_').replace('-', '_') else: abi = None return abi def get_platform(): """Return our platform name 'win32', 'linux_x86_64'""" # XXX remove distutils dependency result = distutils.util.get_platform().replace('.', '_').replace('-', '_') if result == "linux_x86_64" and sys.maxsize == 2147483647: # pip pull request #3497 result = "linux_i686" return result def get_supported(venv, versions=None, supplied_platform=None): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. """ supported = [] # Versions must be given with respect to the preference if versions is None: versions = [] version_info = get_impl_version_info(venv) major = version_info[:-1] # Support all previous minor Python versions. for minor in range(version_info[-1], -1, -1): versions.append(''.join(map(str, major + (minor,)))) impl = get_abbr_impl(venv) abis = [] abi = get_abi_tag(venv) if abi: abis[0:0] = [abi] abi3s = set() import imp for suffix in imp.get_suffixes(): if suffix[0].startswith('.abi'): abi3s.add(suffix[0].split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') platforms = [] if supplied_platform: platforms.append(supplied_platform) platforms.append(get_platform()) # Current version, current API (built specifically for our Python): for abi in abis: for arch in platforms: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # abi3 modules compatible with older version of Python for version in versions[1:]: # abi3 was introduced in Python 3.2 if version in ('31', '30'): break for abi in abi3s: # empty set if not Python 3 for arch in platforms: supported.append(("%s%s" % (impl, version), abi, arch)) # No abi / arch, but requires our implementation: for i, version in enumerate(versions): supported.append(('%s%s' % (impl, version), 'none', 'any')) if i == 0: # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # Major Python version + platform; e.g. binaries not using the Python API supported.append(('py%s' % (versions[0][0]), 'none', arch)) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported PK!;8xMMpoetry/mixology/__init__.pyfrom .dependency_graph import DependencyGraph from .resolver import Resolver PK!,M+JJpoetry/mixology/conflict.pyclass Conflict: def __init__(self, requirement, requirements, existing, possibility_set, locked_requirement, requirement_trees, activated_by_name, underlying_error): self.requirement = requirement self.requirements = requirements self.existing = existing self.possibility_set = possibility_set self.locked_requirement = locked_requirement self.requirement_trees = requirement_trees, self.activated_by_name = activated_by_name self.underlying_error = underlying_error @property def possibility(self): if self.possibility_set and self.possibility_set.latest_version: return self.possibility_set.latest_version PK!ּMM%poetry/mixology/contracts/__init__.pyfrom .specification_provider import SpecificationProvider from .ui import UI PK! 3poetry/mixology/contracts/specification_provider.pyfrom typing import Any from typing import Dict from typing import List from ..conflict import Conflict from ..dependency_graph import DependencyGraph class SpecificationProvider(object): """ Provides information about specifcations and dependencies to the resolver, allowing the Resolver class to remain generic while still providing power and flexibility. This contract contains the methods that users of Molinillo must implement using knowledge of their own model classes. """ @property def name_for_explicit_dependency_source(self): # type: () -> str return 'user-specified dependency' @property def name_for_locking_dependency_source(self): # type: () -> str return 'Lockfile' def search_for(self, dependency): # type: (Any) -> List[Any] """ Search for the specifications that match the given dependency. The specifications in the returned list will be considered in reverse order, so the latest version ought to be last. """ return [] def dependencies_for(self, specification): # type: (Any) -> List[Any] """ Returns the dependencies of specification. """ return [] def is_requirement_satisfied_by(self, requirement, # type: Any activated, # type: DependencyGraph spec # type: Any ): # type: (...) -> Any """ Determines whether the given requirement is satisfied by the given spec, in the context of the current activated dependency graph. """ return True def name_for(self, dependency): # type: (Any) -> str """ Returns the name for the given dependency. """ return str(dependency) def sort_dependencies(self, dependencies, # type: List[Any] activated, # type: DependencyGraph conflicts # type: Dict[str, List[Conflict]] ): # type: (...) -> List[Any] """ Sort dependencies so that the ones that are easiest to resolve are first. Easiest to resolve is (usually) defined by: 1) Is this dependency already activated? 2) How relaxed are the requirements? 3) Are there any conflicts for this dependency? 4) How many possibilities are there to satisfy this dependency? """ return sorted( dependencies, key=lambda dep: ( activated.vertex_named(self.name_for(dep)).payload is None, conflicts.get(self.name_for(dep) is None) ) ) def allow_missing(self, dependency): # type: (Any) -> bool """ Returns whether this dependency, which has no possible matching specifications, can safely be ignored. """ return False PK!}poetry/mixology/contracts/ui.pyimport sys class UI(object): def __init__(self, debug=False): self._debug = debug @property def output(self): return sys.stdout @property def progress_rate(self): # type: () -> float return 0.33 def is_debugging(self): # type: () -> bool return self._debug def indicate_progress(self): # type: () -> None self.output.write('.') def before_resolution(self): # type: () -> None self.output.write('Resolving dependencies...\n') def after_resolution(self): # type: () -> None self.output.write('') def debug(self, message, depth): # type: (...) -> None if self.is_debugging(): debug_info = str(message) debug_info = '\n'.join([ ':{}: {}'.format(str(depth).rjust(4), s) for s in debug_info.split('\n') ]) + '\n' self.output.write(debug_info) PK!%7  #poetry/mixology/dependency_graph.pyfrom .exceptions import CircularDependencyError from .graph.log import Log class DependencyGraph: def __init__(self): self._vertices = {} self._log = Log() @property def vertices(self): return self._vertices @property def log(self): return self._log def tag(self, tag): return self._log.tag(self, tag) def rewind_to(self, tag): return self._log.rewind_to(self, tag) def add_child_vertex(self, name, payload, parent_names, requirement): root = True try: parent_names.index(None) except ValueError: root = False parent_names = [n for n in parent_names if n is not None] vertex = self.add_vertex(name, payload, root) if root: vertex.explicit_requirements.append(requirement) for parent_name in parent_names: parent_vertex = self.vertex_named(parent_name) self.add_edge(parent_vertex, vertex, requirement) return vertex def add_vertex(self, name, payload, root=False): return self._log.add_vertex(self, name, payload, root) def detach_vertex_named(self, name): return self._log.detach_vertex_named(self, name) def vertex_named(self, name): return self.vertices.get(name) def root_vertex_named(self, name): vertex = self.vertex_named(name) if vertex and vertex.root: return vertex def add_edge(self, origin, destination, requirement): if destination.has_path_to(origin): raise CircularDependencyError([origin, destination]) return self.add_edge_no_circular(origin, destination, requirement) def add_edge_no_circular(self, origin, destination, requirement): self._log.add_edge_no_circular( self, origin.name, destination.name, requirement ) def delete_edge(self, edge): return self._log.delete_edge( self, edge.origin.name, edge.destination.name, edge.requirement ) def set_payload(self, name, payload): return self._log.set_payload(self, name, payload) def to_dot(self): dot_vertices = [] dot_edges = [] for n, v in self.vertices.items(): dot_vertices.append( ' {} [label="{}|{}"]'.format(n, n, v.payload or '') ) for e in v.outgoing_edges: label = e.requirement dot_edges.append( ' {} -> {} [label="{}"]'.format( e.origin.name, e.destination.name, label ) ) dot_vertices = sorted(set(dot_vertices)) dot_edges = sorted(set(dot_edges)) dot_vertices.insert(0, 'digraph G {') dot_vertices.append('') dot_edges.append('}') dot = dot_vertices + dot_edges return '\n'.join(dot) def __iter__(self): return iter(self.vertices.values()) PK! >poetry/mixology/exceptions.pyfrom .helpers import flat_map class ResolverError(Exception): pass class NoSuchDependencyError(ResolverError): def __init__(self, dependency, required_by=None): if required_by is None: required_by = [] sources = ' and '.join(['"{}"'.format(r) for r in required_by]) message = 'Unable to find a specification for "{}"'.format(dependency) if sources: message += ' depended upon by {}'.format(sources) super(NoSuchDependencyError, self).__init__(message) class CircularDependencyError(ResolverError): def __init__(self, vertices): super(CircularDependencyError, self).__init__( 'There is a circular dependency between {}'.format( ' and '.join([v.name for v in vertices]) ) ) self._dependencies = [v.payload.possibilities[-1] for v in vertices] @property def dependencies(self): return self._dependencies class VersionConflict(ResolverError): def __init__(self, conflicts, specification_provider): pairs = [] for conflicting in flat_map( list(conflicts.values()), lambda x: x.requirements ): for source, conflict_requirements in conflicting.items(): for c in conflict_requirements: pairs.append((c, source)) super(VersionConflict, self).__init__( 'Unable to satisfy the following requirements:\n\n' '{}'.format( '\n'.join('- "{}" required by "{}"'.format(r, d) for r, d in pairs) ) ) self._conflicts = conflicts self._specification_provider = specification_provider @property def conflicts(self): return self._conflicts @property def specification_provider(self): return self._specification_provider def message_with_trees(self, solver_name='Poetry', possibility_type='possibility named', reduce_trees=lambda trees: sorted(set(trees), key=str), printable_requirement=str, message_for_conflict=None, version_for_spec=str): o = [] for name, conflict in sorted(self._conflicts): o.append( '\n{} could not find compatible versions for {} "{}"_n'.format( solver_name, possibility_type, name ) ) if conflict.locked_requirement: o.append( ' In snapshot ({}):\n'.format( self._specification_provider.name_for_locking_dependency_source ) ) o.append( ' {}\n'.format( printable_requirement(conflict.locked_requirement) ) ) o.append('\n') o.append( ' In {}:\n'.format( self._specification_provider.name_for_explicit_dependency_source ) ) trees = reduce_trees(conflict.requirement_trees) ot = [] for tree in trees: t = '' depth = 2 for req in tree: t += ' ' * depth + str(req) if tree[-1] != req: spec = conflict.activated_by_name.get( self._specification_provider.name_for(req) ) if spec: t += ' was resolved to {}, which'.format( version_for_spec(spec) ) t += ' depends on' t += '\n' depth += 1 ot.append(t) o.append('\n'.join(ot)) if message_for_conflict: message_for_conflict(o, name, conflict) return ''.join(o).strip() PK!!poetry/mixology/graph/__init__.pyPK!"R22poetry/mixology/graph/action.pyfrom typing import Any class Action(object): def __init__(self): self.previous = None self.next = None @property def action_name(self): # type: () -> str raise NotImplementedError() def up(self, graph): # type: (DependencyGraph) -> Any """ Performs the action on the given graph. """ raise NotImplementedError() def down(self, graph): # type: (DependencyGraph) -> None """ Reverses the action on the given graph. """ raise NotImplementedError() PK!Owzz-poetry/mixology/graph/add_edge_no_circular.pyfrom .action import Action from .edge import Edge class AddEdgeNoCircular(Action): def __init__(self, origin, destination, requirement): super(AddEdgeNoCircular, self).__init__() self._origin = origin self._destination = destination self._requirement = requirement @property def action_name(self): return 'add_edge_no_circular' @property def origin(self): return self._origin @property def destination(self): return self._destination @property def requirement(self): return self._requirement def up(self, graph): edge = self.make_edge(graph) edge.origin.outgoing_edges.append(edge) edge.destination.incoming_edges.append(edge) return edge def down(self, graph): edge = self.make_edge(graph) self._delete_first(edge.origin.outgoing_edges, edge) self._delete_first(edge.destination.incoming_edges, edge) def make_edge(self, graph): return Edge( graph.vertex_named(self._origin), graph.vertex_named(self._destination), self._requirement ) def _delete_first(self, elements, element): """ :type elements: list """ try: index = elements.index(element) except ValueError: return del elements[index] PK!1II#poetry/mixology/graph/add_vertex.pyfrom .action import Action from .vertex import Vertex _NULL = object() class AddVertex(Action): def __init__(self, name, payload, root): super(AddVertex, self).__init__() self._name = name self._payload = payload self._root = root self._existing_payload = _NULL self._existing_root = None @property def action_name(self): return 'add_vertex' @property def name(self): return self._name @property def payload(self): return self._payload @property def root(self): return self._root def up(self, graph): existing = graph.vertices.get(self._name) if existing: self._existing_payload = existing.payload self._existing_root = existing.root vertex = existing or Vertex(self._name, self._payload) graph.vertices[vertex.name] = vertex if not vertex.payload: vertex.payload = self.payload if not vertex.root: vertex.root = self.root return vertex def down(self, graph): if self._existing_payload is not _NULL: vertex = graph.vertices[self._name] vertex.payload = self._existing_payload vertex.root = self._existing_root else: del graph.vertices[self._name] PK!j^^$poetry/mixology/graph/delete_edge.pyfrom .action import Action from .edge import Edge class DeleteEdge(Action): def __init__(self, origin, destination, requirement): super(DeleteEdge, self).__init__() self._origin = origin self._destination = destination self._requirement = requirement @property def action_name(self): return 'delete_edge' @property def origin(self): return self._origin @property def destination(self): return self._destination @property def requirement(self): return self._requirement def up(self, graph): edge = self.make_edge(graph) self._delete_first(edge.origin.outgoing_edges, edge) self._delete_first(edge.destination.incoming_edges, edge) return edge def down(self, graph): edge = self.make_edge(graph) edge.origin.outgoing_edges.append(edge) edge.origin.incoming_edges.append(edge) def make_edge(self, graph): return Edge( graph.vertex_named(self._origin), graph.vertex_named(self._destination), self._requirement ) def _delete_first(self, elements, element): """ :type elements: list """ try: index = elements.index(element) except ValueError: return del elements[index] PK!͍ ,poetry/mixology/graph/detach_vertex_named.pyfrom .action import Action class DetachVertexNamed(Action): def __init__(self, name): super(DetachVertexNamed, self).__init__() self._name = name self._vertex = None @property def action_name(self): return 'detach_vertex' @property def name(self): return self._name def up(self, graph): if self._name not in graph.vertices: return [] self._vertex = graph.vertices[self._name] del graph.vertices[self._name] removed_vertices = [self._vertex] for e in self._vertex.outgoing_edges: v = e.destination try: v.incoming_edges.remove(e) except ValueError: pass if not v.root and not v.incoming_edges: removed_vertices += graph.detach_vertex_named(v.name) for e in self._vertex.incoming_edges: v = e.origin try: v.outgoing_edges.remove(e) except ValueError: pass return removed_vertices def down(self, graph): if self._vertex is None: return graph.vertices[self._vertex.name] = self._vertex for e in self._vertex.outgoing_edges: e.destination.incoming_edges.append(e) for e in self._vertex.incoming_edges: e.origin.outgoing_edges.append(e) PK!Npoetry/mixology/graph/edge.pyclass Edge: """ A directed edge of a DependencyGraph """ def __init__(self, origin, destination, requirement): self._origin = origin self._destination = destination self._requirement = requirement @property def origin(self): return self._origin @property def destination(self): return self._destination @property def requirement(self): return self._requirement def __eq__(self, other): return self._origin == other.origin and self._destination == other.destination def __repr__(self): return ' {}>'.format( self._origin.name, self._destination.name ) PK!T0 0 poetry/mixology/graph/log.pyfrom .add_edge_no_circular import AddEdgeNoCircular from .add_vertex import AddVertex from .delete_edge import DeleteEdge from .detach_vertex_named import DetachVertexNamed from .set_payload import SetPayload from .tag import Tag class Log: """ A log for dependency graph actions. """ def __init__(self): self._current_action = None self._first_action = None def tag(self, graph, tag): """ Tags the current state of the dependency as the given tag. """ return self._push_action(graph, Tag(tag)) def add_vertex(self, graph, name, payload, root): return self._push_action(graph, AddVertex(name, payload, root)) def detach_vertex_named(self, graph, name): return self._push_action(graph, DetachVertexNamed(name)) def add_edge_no_circular(self, graph, origin, destination, requirement): action = AddEdgeNoCircular(origin, destination, requirement) return self._push_action(graph, action) def delete_edge(self, graph, origin, destination, requirement): action = DeleteEdge(origin, destination, requirement) return self._push_action(graph, action) def set_payload(self, graph, name, payload): return self._push_action(graph, SetPayload(name, payload)) def pop(self, graph): action = self._current_action if not action: return self._current_action = action.previous if not self._current_action: self._first_action = None action.down(graph) return action def rewind_to(self, graph, tag): while True: action = self.pop(graph) if not action: raise ValueError('No tag "{}" found'.format(tag)) if isinstance(action, Tag) and action.tag == tag: break def _push_action(self, graph, action): """ Adds the given action to the log, running the action :param graph: The graph :param action: The action :type action: Action """ action.previous = self._current_action if self._current_action: self._current_action.next = action self._current_action = action if not self._first_action: self._first_action = action return action.up(graph) PK!$poetry/mixology/graph/set_payload.pyfrom .action import Action class SetPayload(Action): def __init__(self, name, payload): super(SetPayload, self).__init__() self._name = name self._payload = payload self._old_payload = None @property def action_name(self): return 'set_payload' @property def name(self): return self._name @property def payload(self): return self._payload def up(self, graph): vertex = graph.vertex_named(self._name) self._old_payload = vertex.payload vertex.payload = self._payload def down(self, graph): graph.vertex_named(self._name).payload = self._old_payload PK!lZ9UUpoetry/mixology/graph/tag.pyfrom .action import Action class Tag(Action): def __init__(self, tag): super(Tag, self).__init__() self._tag = tag @property def action_name(self): return 'tag' @property def tag(self): return self._tag def up(self, graph): pass def down(self, graph): pass PK!ڀ_ _ poetry/mixology/graph/vertex.pyfrom ..utils import unique class Vertex: def __init__(self, name, payload): self.name = name self.payload = payload self.root = False self._explicit_requirements = [] self.outgoing_edges = [] self.incoming_edges = [] @property def explicit_requirements(self): return self._explicit_requirements @property def requirements(self): return unique([ edge.requirement for edge in self.incoming_edges ] + self._explicit_requirements) @property def predecessors(self): return [edge.origin for edge in self.incoming_edges] @property def recursive_predecessors(self): return self._recursive_predecessors() def _recursive_predecessors(self, vertices=None): if vertices is None: vertices = set() for edge in self.incoming_edges: vertex = edge.origin if vertex in vertices: continue vertices.add(vertex) vertex._recursive_predecessors(vertices) return vertices @property def successors(self): return [ edge.destination for edge in self.outgoing_edges ] @property def recursive_successors(self): return self._recursive_successors() def _recursive_successors(self, vertices=None): if vertices is None: vertices = set() for edge in self.outgoing_edges: vertex = edge.destination if vertex in vertices: continue vertices.add(vertex) vertex._recursive_successors(vertices) return vertices def __eq__(self, other): if not isinstance(other, Vertex): return NotImplemented if self is other: return True return ( self.name == other.name and self.payload == other.payload and set(self.successors) == set(other.successors) ) def __hash__(self): return hash(self.name) def has_path_to(self, other): return ( self == other or any([v.has_path_to(other) for v in self.successors]) ) def is_ancestor(self, other): return other.path_to(self) def __repr__(self): return ''.format(self.name, self.payload) PK!C_Npoetry/mixology/helpers.pydef flat_map(iter, callable): if not isinstance(iter, (list, tuple)): yield callable(iter) else: for v in iter: for i in flat_map(v, callable): yield i PK!fSh"poetry/mixology/possibility_set.pyclass PossibilitySet: def __init__(self, dependencies, possibilities): self.dependencies = dependencies self.possibilities = possibilities @property def latest_version(self): if self.possibilities: return self.possibilities[-1] def __str__(self): return '[{}]'.format(', '.join([str(p) for p in self.possibilities])) def __repr__(self): return ''.format(str(self)) PK!spoetry/mixology/resolution.py# -*- coding: utf-8 -*- import logging from copy import copy from datetime import datetime from typing import Any from typing import List from .contracts import SpecificationProvider from .contracts import UI from .exceptions import CircularDependencyError from .exceptions import VersionConflict from .conflict import Conflict from .dependency_graph import DependencyGraph from .helpers import flat_map from .possibility_set import PossibilitySet from .state import DependencyState from .unwind_details import UnwindDetails from .utils import unique logger = logging.getLogger(__name__) class Resolution: def __init__(self, provider, # type: SpecificationProvider ui, # type: UI requested, # type: List[Any] base # type: DependencyGraph ): self._provider = provider self._ui = ui self._requested = requested self._original_requested = copy(requested) self._base = base self._states = [] self._iteration_counter = 0 self._progress_rate = 0.33 self._iteration_rate = None self._parents_of = {} self._started_at = None @property def provider(self): # type: () -> SpecificationProvider return self._provider @property def ui(self): # type: () -> UI return self._ui @property def requested(self): # type: () -> List[Any] return self._requested @property def base(self): # type: () -> DependencyGraph return self._base @property def activated(self): # type: () -> DependencyGraph return self.state.activated def resolve(self): # type: () -> DependencyGraph """ Resolve the original requested dependencies into a full dependency graph. """ self._start() try: while self.state: if not self.state.requirement and not self.state.requirements: break self._indicate_progress() if hasattr(self.state, 'pop_possibility_state'): self._debug( 'Creating possibility state for {} ({} remaining)' .format( str(self.state.requirement), len(self.state.possibilities) ) ) s = self.state.pop_possibility_state() if s: self._states.append(s) self.activated.tag(s) self._process_topmost_state() return self._resolve_activated_specs() finally: self._end() def _start(self): # type: () -> None """ Set up the resolution process. """ self._started_at = datetime.now() self._debug( 'Starting resolution ({})\nRequested dependencies: {}'.format( self._started_at, [str(d) for d in self._original_requested] ) ) self._ui.before_resolution() self._handle_missing_or_push_dependency_state(self._initial_state()) def _resolve_activated_specs(self): # type: () -> DependencyGraph for vertex in self.activated.vertices.values(): if not vertex.payload: continue latest_version = None for possibility in reversed(list(vertex.payload.possibilities)): if all( [ self._provider.is_requirement_satisfied_by( req, self.activated, possibility ) for req in vertex.requirements ] ): latest_version = possibility break self.activated.set_payload(vertex.name, latest_version) return self.activated def _end(self): # type: () -> None """ Ends the resolution process """ elapsed = (datetime.now() - self._started_at).total_seconds() self._ui.after_resolution() self._debug( 'Finished resolution ({} steps) ' 'in {:.3f} seconds'.format( self._iteration_counter, elapsed ) ) def _process_topmost_state(self): # type: () -> None """ Processes the topmost available RequirementState on the stack. """ try: if self.possibility: self._attempt_to_activate() else: self._create_conflict() self._unwind_for_conflict() except CircularDependencyError as e: self._create_conflict(e) self._unwind_for_conflict() @property def possibility(self): # type: () -> PossibilitySet """ The current possibility that the resolution is trying. """ if self.state.possibilities: return self.state.possibilities[-1] @property def state(self): # type: () -> DependencyState """ The current state the resolution is operating upon. """ if self._states: return self._states[-1] @property def name(self): # type: () -> str return self.state.name @property def requirement(self): # type: () -> Any return self.state.requirement def _initial_state(self): # type: () -> DependencyState """ Create the initial state for the resolution, based upon the requested dependencies. """ graph = DependencyGraph() for requested in self._original_requested: vertex = graph.add_vertex( self._provider.name_for(requested), None, True ) vertex.explicit_requirements.append(requested) graph.tag('initial_state') requirements = self._provider.sort_dependencies( self._original_requested, graph, {} ) initial_requirement = None if requirements: initial_requirement = requirements.pop(0) name = None if initial_requirement: name = self._provider.name_for(initial_requirement) return DependencyState( name, requirements, graph, initial_requirement, self._possibilities_for_requirement(initial_requirement, graph), 0, {}, [] ) def _unwind_for_conflict(self): # type: () -> None """ Unwinds the states stack because a conflict has been encountered """ details_for_unwind = self._build_details_for_unwind() unwind_options = self.state.unused_unwind_options self._debug( 'Unwinding for conflict: ' '{} to {}'.format( str(self.state.requirement), details_for_unwind.state_index // 2 ), self.state.depth ) conflicts = self.state.conflicts sliced_states = self._states[details_for_unwind.state_index + 1:] self._states = self._states[:details_for_unwind.state_index + 1] self._raise_error_unless_state(conflicts) if sliced_states: self.activated.rewind_to( sliced_states[0] or 'initial_state' ) self.state.conflicts = conflicts self.state.unused_unwind_options = unwind_options self._filter_possibilities_after_unwind(details_for_unwind) index = len(self._states) - 1 for k, l in self._parents_of.items(): self._parents_of[k] = [x for x in l if x < index] self.state.unused_unwind_options = [ uw for uw in self.state.unused_unwind_options if uw.state_index < index ] def _raise_error_unless_state(self, conflicts): # type: (dict) -> None """ Raise a VersionConflict error, or any underlying error, if there is no current state """ if self.state: return errors = [c.underlying_error for c in conflicts.values() if c.underlying_error is not None] if errors: error = errors[0] else: error = VersionConflict(conflicts, self._provider) raise error def _build_details_for_unwind(self): # type: () -> UnwindDetails """ Return the details of the nearest index to which we could unwind. """ # Get the possible unwinds for the current conflict current_conflict = self.state.conflicts[self.state.name] binding_requirements = self._binding_requirements_for_conflict( current_conflict ) unwind_details = self._unwind_options_for_requirements( binding_requirements ) last_detail_for_current_unwind = sorted(unwind_details)[-1] current_detail = last_detail_for_current_unwind # Look for past conflicts that could be unwound to affect the # requirement tree for the current conflict relevant_unused_unwinds = [] for alternative in self.state.unused_unwind_options: intersecting_requirements = ( set(last_detail_for_current_unwind.all_requirements) & set(alternative.requirements_unwound_to_instead) ) if not intersecting_requirements: continue # Find the highest index unwind whilst looping through if alternative > current_detail: current_detail = alternative relevant_unused_unwinds.append(alternative) # Add the current unwind options to the `unused_unwind_options` array. # The "used" option will be filtered out during `unwind_for_conflict`. self.state.unused_unwind_options += [ detail for detail in unwind_details if detail.state_index != -1 ] # Update the requirements_unwound # to_instead on any relevant unused unwinds for d in relevant_unused_unwinds: d.requirements_unwound_to_instead.append( current_detail.state_requirement ) for d in unwind_details: d.requirements_unwound_to_instead.append( current_detail.state_requirement ) return current_detail def _unwind_options_for_requirements(self, binding_requirements ): # type: (list) -> List[UnwindDetails] unwind_details = [] trees = [] for r in reversed(binding_requirements): partial_tree = [r] trees.append(partial_tree) unwind_details.append( UnwindDetails( -1, None, partial_tree, binding_requirements, trees, [] ) ) # If this requirement has alternative possibilities, # check if any would satisfy the other requirements # that created this conflict requirement_state = self._find_state_for(r) if self._conflict_fixing_possibilities(requirement_state, binding_requirements): unwind_details.append( UnwindDetails( self._states.index(requirement_state), r, partial_tree, binding_requirements, trees, [] ) ) # Next, look at the parent of this requirement, # and check if the requirement could have been avoided # if an alternative PossibilitySet had been chosen parent_r = self._parent_of(r) if parent_r is None: continue partial_tree.insert(0, parent_r) requirement_state = self._find_state_for(parent_r) possibilities = [ r.name in map(lambda x: x.name, set_.dependencies) for set_ in requirement_state.possibilities ] if any(possibilities): unwind_details.append( UnwindDetails( self._states.index(requirement_state), parent_r, partial_tree, binding_requirements, trees, [] ) ) # Finally, look at the grandparent and up of this requirement, # looking for any possibilities that wouldn't # create their parent requirement grandparent_r = self._parent_of(parent_r) while grandparent_r is not None: partial_tree.insert(0, grandparent_r) requirement_state = self._find_state_for(grandparent_r) possibilities = [ parent_r.name in map(lambda x: x.name, set_.dependencies) for set_ in requirement_state.possibilities ] if any(possibilities): unwind_details.append( UnwindDetails( self._states.index(requirement_state), grandparent_r, partial_tree, binding_requirements, trees, [] ) ) parent_r = grandparent_r grandparent_r = self._parent_of(parent_r) return unwind_details def _conflict_fixing_possibilities(self, state, binding_requirements): """ Return whether or not the given state has any possibilities that could satisfy the given requirements :rtype: bool """ if not state: return False return any([ any([ self._possibility_satisfies_requirements( poss, binding_requirements ) ]) for possibility_set in state.possibilities for poss in possibility_set.possibilities ]) def _filter_possibilities_after_unwind(self, unwind_details): """ Filter a state's possibilities to remove any that would not fix the conflict we've just rewound from :type unwind_details: UnwindDetails """ if not self.state or not self.state.possibilities: return if unwind_details.unwinding_to_primary_requirement(): self._filter_possibilities_for_primary_unwind(unwind_details) else: self._filter_possibilities_for_parent_unwind(unwind_details) def _filter_possibilities_for_primary_unwind(self, unwind_details): """ Filter a state's possibilities to remove any that would not satisfy the requirements in the conflict we've just rewound from. :type unwind_details: UnwindDetails """ unwinds_to_state = [ uw for uw in self.state.unused_unwind_options if uw.state_index == unwind_details.state_index ] unwinds_to_state.append(unwind_details) unwind_requirement_sets = [ uw.conflicting_requirements for uw in unwinds_to_state ] possibilities = [] for possibility_set in self.state.possibilities: if not any([ any([ self._possibility_satisfies_requirements( poss, requirements ) ]) for poss in possibility_set.possibilities for requirements in unwind_requirement_sets ]): continue possibilities.append(possibility_set) self.state.possibilities = possibilities def _possibility_satisfies_requirements(self, possibility, requirements): name = self._provider.name_for(possibility) self.activated.tag('swap') if self.activated.vertex_named(name): self.activated.set_payload(name, possibility) satisfied = all([ self._provider.is_requirement_satisfied_by( r, self.activated, possibility ) for r in requirements ]) self.activated.rewind_to('swap') return satisfied def _filter_possibilities_for_parent_unwind(self, unwind_details # type: UnwindDetails ): """ Filter a state's possibilities to remove any that would (eventually) the requirements in the conflict we've just rewound from. """ unwinds_to_state = [ uw for uw in self.state.unused_unwind_options if uw.state_index == unwind_details.state_index ] unwinds_to_state.append(unwind_details) primary_unwinds = unique([ uw for uw in unwinds_to_state if uw.unwinding_to_primary_requirement() ]) parent_unwinds = unique(unwinds_to_state) parent_unwinds = [uw for uw in parent_unwinds if uw not in primary_unwinds] allowed_possibility_sets = [] for unwind in primary_unwinds: for possibility_set in self._states[unwind.state_index].possibilities: if any([ self._possibility_satisfies_requirements( poss, unwind.conflicting_requirements ) for poss in possibility_set.possibilities ]): allowed_possibility_sets.append(possibility_set) requirements_to_avoid = list(flat_map( parent_unwinds, lambda x: x.sub_dependencies_to_avoid )) possibilities = [] for possibility_set in self.state.possibilities: if ( possibility_set in allowed_possibility_sets or [ r for r in requirements_to_avoid if r not in possibility_set.dependencies ] ): possibilities.append(possibility_set) self.state.possibilities = possibilities def _binding_requirements_for_conflict(self, conflict): """ Return the minimal list of requirements that would cause the passed conflict to occur. :rtype: list """ if conflict.possibility is None: return [conflict.requirement] possible_binding_requirements_set = list(conflict.requirements.values()) possible_binding_requirements = [] for reqs in possible_binding_requirements_set: if isinstance(reqs, list): possible_binding_requirements += reqs else: possible_binding_requirements.append(reqs) possible_binding_requirements = unique(possible_binding_requirements) # When there’s a `CircularDependency` error the conflicting requirement # (the one causing the circular) won’t be `conflict.requirement` # (which won’t be for the right state, because we won’t have created it, # because it’s circular). # We need to make sure we have that requirement in the conflict’s list, # otherwise we won’t be able to unwind properly, so we just return all # the requirements for the conflict. if conflict.underlying_error: return possible_binding_requirements possibilities = self._provider.search_for(conflict.requirement) # If all the requirements together don't filter out all possibilities, # then the only two requirements we need to consider are the initial one # (where the dependency's version was first chosen) and the last if self._binding_requirement_in_set( None, possible_binding_requirements, possibilities ): return list(filter(None, [ conflict.requirement, self._requirement_for_existing_name( self._provider.name_for(conflict.requirement) ) ])) # Loop through the possible binding requirements, removing each one # that doesn't bind. Use a reversed as we want the earliest set of # binding requirements. binding_requirements = copy(possible_binding_requirements) for req in reversed(possible_binding_requirements): if req == conflict.requirement: continue if not self._binding_requirement_in_set( req, binding_requirements, possibilities ): index = binding_requirements.index(req) del binding_requirements[index] return binding_requirements def _binding_requirement_in_set(self, requirement, possible_binding_requirements, possibilities): # type: () -> bool """ Return whether or not the given requirement is required to filter out all elements of the list of possibilities. """ return any([ self._possibility_satisfies_requirements( poss, set(possible_binding_requirements) - set([requirement]) ) for poss in possibilities ]) def _parent_of(self, requirement): if not requirement: return if requirement not in self._parents_of: self._parents_of[requirement] = [] if not self._parents_of[requirement]: return try: index = self._parents_of[requirement][-1] except ValueError: return try: parent_state = self._states[index] except ValueError: return return parent_state.requirement def _requirement_for_existing_name(self, name): vertex = self.activated.vertex_named(name) if not vertex: return if not vertex.payload: return for s in self._states: if s.name == name: return s.requirement def _find_state_for(self, requirement): if not requirement: return for s in self._states: if s.requirement == requirement: return s def _create_conflict(self, underlying_error=None): vertex = self.activated.vertex_named(self.state.name) locked_requirement = self._locked_requirement_named(self.state.name) requirements = {} if vertex.explicit_requirements: requirements[self._provider.name_for_explicit_dependency_source] = vertex.explicit_requirements if locked_requirement: requirements[self._provider.name_for_locking_dependency_source] = [locked_requirement] for edge in vertex.incoming_edges: if edge.origin.payload.latest_version not in requirements: requirements[edge.origin.payload.latest_version] = [] requirements[edge.origin.payload.latest_version].insert(0, edge.requirement) activated_by_name = {} for v in self.activated: if v.payload: activated_by_name[v.name] = v.payload.latest_version conflict = Conflict( self.requirement, requirements, vertex.payload.latest_version if vertex.payload else None, self.possibility, locked_requirement, self.requirement_trees, activated_by_name, underlying_error ) self.state.conflicts[self.name] = conflict return conflict @property def requirement_trees(self): vertex = self.activated.vertex_named(self.state.name) return [self._requirement_tree_for(r) for r in vertex.requirements] def _requirement_tree_for(self, requirement): tree = [] while requirement: tree.insert(0, requirement) requirement = self._parent_of(requirement) return tree def _indicate_progress(self): self._iteration_counter += 1 progress_rate = self._ui.progress_rate or self._progress_rate if self._iteration_rate is None: if (datetime.now() - self._started_at).total_seconds() >= progress_rate: self._iteration_rate = self._iteration_counter if self._iteration_rate and (self._iteration_counter % self._iteration_rate) == 0: self._ui.indicate_progress() def _debug(self, message, depth=0): self._ui.debug(message, depth) def _attempt_to_activate(self): self._debug( 'Attempting to activate {}'.format(str(self.possibility)), self.state.depth, ) existing_vertex = self.activated.vertex_named(self.state.name) if existing_vertex.payload: self._debug( 'Found existing spec ({})'.format(existing_vertex.payload), self.state.depth ) self._attempt_to_filter_existing_spec(existing_vertex) else: latest = self.possibility.latest_version possibilities = [] for possibility in self.possibility.possibilities: if self._provider.is_requirement_satisfied_by( self.requirement, self.activated, possibility ): possibilities.append(possibility) self.possibility.possibilities = possibilities if self.possibility.latest_version is None: # ensure there's a possibility for better error messages if latest: self.possibility.possibilities.append(latest) self._create_conflict() self._unwind_for_conflict() else: self._activate_new_spec() def _attempt_to_filter_existing_spec(self, vertex): """ Attempt to update the existing vertex's `PossibilitySet` with a filtered version. """ filtered_set = self._filtered_possibility_set(vertex) if filtered_set.possibilities: self.activated.set_payload(self.name, filtered_set) new_requirements = copy(self.state.requirements) self._push_state_for_requirements(new_requirements, False) else: self._create_conflict() self._debug( 'Unsatisfied by existing spec ({})'.format(str(vertex.payload)), self.state.depth ) self._unwind_for_conflict() def _filtered_possibility_set(self, vertex): possibilities = [ p for p in vertex.payload.possibilities if p in self.possibility.possibilities ] return PossibilitySet( vertex.payload.dependencies, possibilities ) def _locked_requirement_named(self, requirement_name): vertex = self.base.vertex_named(requirement_name) if vertex: return vertex.payload def _activate_new_spec(self): if self.state.name in self.state.conflicts: del self.state.conflicts[self.name] self._debug( 'Activated {} at {}'.format(self.state.name, str(self.possibility)), self.state.depth ) self.activated.set_payload(self.state.name, self.possibility) self._require_nested_dependencies_for(self.possibility) def _require_nested_dependencies_for(self, possibility_set): nested_dependencies = self._provider.dependencies_for( possibility_set.latest_version ) self._debug( 'Requiring nested dependencies ' '({})'.format(', '.join([str(d) for d in nested_dependencies])), self.state.depth ) for d in nested_dependencies: self.activated.add_child_vertex( self._provider.name_for(d), None, [self._provider.name_for(possibility_set.latest_version)], d ) parent_index = len(self._states) - 1 if d not in self._parents_of: self._parents_of[d] = [] parents = self._parents_of[d] if not parents: parents.append(parent_index) self._push_state_for_requirements( self.state.requirements + nested_dependencies, len(nested_dependencies) > 0 ) def _push_state_for_requirements(self, new_requirements, requires_sort=True, new_activated=None): if new_activated is None: new_activated = self.activated if requires_sort: new_requirements = self._provider.sort_dependencies( unique(new_requirements), new_activated, self.state.conflicts ) while True: new_requirement = None if new_requirements: new_requirement = new_requirements.pop(0) if ( new_requirement is None or not any([ s.requirement == new_requirement for s in self._states ]) ): break new_name = '' if new_requirement: new_name = self._provider.name_for(new_requirement) possibilities = self._possibilities_for_requirement(new_requirement) self._handle_missing_or_push_dependency_state( DependencyState( new_name, new_requirements, new_activated, new_requirement, possibilities, self.state.depth, copy(self.state.conflicts), copy(self.state.unused_unwind_options) ) ) def _possibilities_for_requirement(self, requirement, activated=None): if activated is None: activated = self.activated if not requirement: return [] if self._locked_requirement_named(self._provider.name_for(requirement)): return self._locked_requirement_possibility_set( requirement, activated ) return self._group_possibilities( self._provider.search_for(requirement) ) def _locked_requirement_possibility_set(self, requirement, activated=None): if activated is None: activated = self.activated all_possibilities = self._provider.search_for(requirement) locked_requirement = self._locked_requirement_named( self._provider.name_for(requirement) ) # Longwinded way to build a possibilities list with either the locked # requirement or nothing in it. Required, since the API for # locked_requirement isn't guaranteed. locked_possibilities = [ possibility for possibility in all_possibilities if self._provider.is_requirement_satisfied_by( locked_requirement, activated, possibility ) ] return self._group_possibilities(locked_possibilities) def _group_possibilities(self, possibilities): possibility_sets = [] current_possibility_set = None for possibility in reversed(possibilities): self._debug( 'Getting dependencies for {}'.format(possibility), depth=self.state.depth if self.state else 0 ) dependencies = self._provider.dependencies_for(possibility) if current_possibility_set and current_possibility_set.dependencies == dependencies: current_possibility_set.possibilities.insert(0, possibility) else: possibility_sets.insert( 0, PossibilitySet(dependencies, [possibility]) ) current_possibility_set = possibility_sets[0] return possibility_sets def _handle_missing_or_push_dependency_state(self, state): if ( state.requirement and not state.possibilities and self._provider.allow_missing(state.requirement) ): state.activated.detach_vertex_named(state.name) self._push_state_for_requirements( copy(state.requirements), False, state.activated ) else: self._states.append(state) state.activated.tag(state) PK!|`vvpoetry/mixology/resolver.pyfrom typing import Any from typing import List from typing import Union from .contracts import SpecificationProvider from .contracts import UI from .dependency_graph import DependencyGraph from .resolution import Resolution class Resolver: def __init__(self, specification_provider, # type: SpecificationProvider resolver_ui # type: UI ): self._specification_provider = specification_provider self._resolver_ui = resolver_ui @property def specification_provider(self): # type: () -> SpecificationProvider return self._specification_provider @property def ui(self): # type: () -> UI return self._resolver_ui def resolve(self, requested, # type: List[Any] base=None # type: Union[DependencyGraph, None] ): # type: (...) -> DependencyGraph if base is None: base = DependencyGraph() return Resolution( self._specification_provider, self._resolver_ui, requested, base ).resolve() PK!s4poetry/mixology/state.pyfrom copy import copy from .dependency_graph import DependencyGraph class ResolutionState: def __init__(self, name, requirements, activated, requirement, possibilities, depth, conflicts, unused_unwind_options): self._name = name self._requirements = requirements self._activated = activated self._requirement = requirement self.possibilities = possibilities self._depth = depth self.conflicts = conflicts self.unused_unwind_options = unused_unwind_options @property def name(self): return self._name @property def requirements(self): return self._requirements @property def activated(self): return self._activated @property def requirement(self): return self._requirement @property def depth(self): return self._depth @classmethod def empty(cls): return cls(None, [], DependencyGraph(), None, None, 0, {}, []) def __repr__(self): return '<{} {} ({})>'.format( self.__class__.__name__, self._name, str(self.requirement) ) class PossibilityState(ResolutionState): pass class DependencyState(ResolutionState): def pop_possibility_state(self): state = PossibilityState( self._name, copy(self._requirements), self._activated, self._requirement, [self.possibilities.pop() if self.possibilities else None], self._depth + 1, copy(self.conflicts), copy(self.unused_unwind_options) ) state.activated.tag(state) return state PK!: !poetry/mixology/unwind_details.pyclass UnwindDetails: def __init__(self, state_index, state_requirement, requirement_tree, conflicting_requirements, requirement_trees, requirements_unwound_to_instead): self.state_index = state_index self.state_requirement = state_requirement self.requirement_tree = requirement_tree self.conflicting_requirements = conflicting_requirements self.requirement_trees = requirement_trees self.requirements_unwound_to_instead = requirements_unwound_to_instead self._reversed_requirement_tree_index = None self._sub_dependencies_to_avoid = None self._all_requirements = None @property def reversed_requirement_tree_index(self): if self._reversed_requirement_tree_index is None: if self.state_requirement: self._reversed_requirement_tree_index = list(reversed( self.requirement_tree )).index(self.state_requirement) else: self._reversed_requirement_tree_index = 999999 return self._reversed_requirement_tree_index def unwinding_to_primary_requirement(self): return self.requirement_tree[-1] == self.state_requirement @property def sub_dependencies_to_avoid(self): if self._sub_dependencies_to_avoid is None: self._sub_dependencies_to_avoid = [] for tree in self.requirement_trees: try: index = tree.index(self.state_requirement) except ValueError: continue if tree[index + 1] is not None: self._sub_dependencies_to_avoid.append(tree[index + 1]) return self._sub_dependencies_to_avoid @property def all_requirements(self): if self._all_requirements is None: self._all_requirements = [ x for tree in self.requirement_trees for x in tree ] return self._all_requirements def __eq__(self, other): if not isinstance(other, UnwindDetails): return NotImplemented return ( self.state_index == other.state_index and ( self.reversed_requirement_tree_index == other.reversed_requirement_tree_index ) ) def __lt__(self, other): if not isinstance(other, UnwindDetails): return NotImplemented return self.state_index < other.state_index def __le__(self, other): if not isinstance(other, UnwindDetails): return NotImplemented return self.state_index <= other.state_index def __gt__(self, other): if not isinstance(other, UnwindDetails): return NotImplemented return self.state_index > other.state_index def __ge__(self, other): if not isinstance(other, UnwindDetails): return NotImplemented return self.state_index >= other.state_index def __hash__(self): return hash((id(self), self.state_index, self.state_requirement)) PK!gծffpoetry/mixology/utils.pydef unique(l): used = set() return [x for x in l if x not in used and (used.add(x) or True)] PK!Αpoetry/packages/__init__.pyimport os import re from poetry.version.requirements import Requirement from .dependency import Dependency from .file_dependency import FileDependency from .locker import Locker from .package import Package from .utils.link import Link from .utils.utils import convert_markers from .utils.utils import group_markers from .utils.utils import is_archive_file from .utils.utils import is_installable_dir from .utils.utils import is_url from .utils.utils import path_to_url from .utils.utils import strip_extras from .vcs_dependency import VCSDependency def dependency_from_pep_508(name): req = Requirement(name) if req.marker: markers = convert_markers(req.marker.markers) else: markers = {} name = req.name path = os.path.normpath(os.path.abspath(name)) link = None if is_url(name): link = Link(name) else: p, extras = strip_extras(path) if (os.path.isdir(p) and (os.path.sep in name or name.startswith('.'))): if not is_installable_dir(p): raise ValueError( "Directory {!r} is not installable. File 'setup.py' " "not found.".format(name) ) link = Link(path_to_url(p)) elif is_archive_file(p): link = Link(path_to_url(p)) # it's a local file, dir, or url if link: # Handle relative file URLs if link.scheme == 'file' and re.search(r'\.\./', link.url): link = Link( path_to_url(os.path.normpath(os.path.abspath(link.path))) ) # wheel file if link.is_wheel: m = re.match( '^(?P(?P.+?)-(?P\d.*?))', link.filename ) if not m: raise ValueError('Invalid wheel name: {}'.format(link.filename)) name = m.group('name') version = m.group('ver') dep = Dependency(name, version) else: name = link.egg_fragment if link.scheme == 'git': dep = VCSDependency(name, 'git', link.url_without_fragment) else: dep = Dependency(name, '*') else: if req.pretty_constraint: constraint = req.constraint else: constraint = '*' dep = Dependency(name, constraint) if 'extra' in markers: # If we have extras, the dependency is optional dep.deactivate() for or_ in markers['extra']: for _, extra in or_: dep.extras.append(extra) if 'python_version' in markers: ors = [] for or_ in markers['python_version']: ands = [] for op, version in or_: # Expand python version if op == '==': version = '~' + version op = '' elif op == '!=': version += '.*' elif op == 'in': versions = [] for v in version.split(' '): split = v.split('.') if len(split) in [1, 2]: split.append('*') op = '' else: op = '==' versions.append(op + '.'.join(split)) if versions: ands.append(' || '.join(versions)) continue ands.append('{}{}'.format(op, version)) ors.append(' '.join(ands)) dep.python_versions = ' || '.join(ors) if 'sys_platform' in markers: ors = [] for or_ in markers['sys_platform']: ands = [] for op, platform in or_: if op == '==': op = '' elif op == 'in': platforms = [] for v in platform.split(' '): platforms.append(v) if platforms: ands.append(' || '.join(platforms)) continue ands.append('{}{}'.format(op, platform)) ors.append(' '.join(ands)) dep.platform = ' || '.join(ors) return dep PK!'poetry/packages/constraints/__init__.pyPK! )==1poetry/packages/constraints/generic_constraint.pyimport operator import re from poetry.semver.constraints import EmptyConstraint from poetry.semver.constraints import MultiConstraint from poetry.semver.constraints.base_constraint import BaseConstraint class GenericConstraint(BaseConstraint): """ Represents a generic constraint. This is particularly useful for platform/system/os/extra constraints. """ OP_EQ = operator.eq OP_NE = operator.ne _trans_op_str = { '=': OP_EQ, '==': OP_EQ, '!=': OP_NE } _trans_op_int = { OP_EQ: '==', OP_NE: '!=' } def __init__(self, operator, version): if operator not in self._trans_op_str: raise ValueError( 'Invalid operator "{}" given, ' 'expected one of: {}' .format( operator, ', '.join(self.supported_operators) ) ) self._operator = self._trans_op_str[operator] self._string_operator = self._trans_op_int[self._operator] self._version = version @property def supported_operators(self): return list(self._trans_op_str.keys()) @property def operator(self): return self._operator @property def string_operator(self): return self._string_operator @property def version(self): return self._version def matches(self, provider): if not isinstance(provider, (GenericConstraint, EmptyConstraint)): raise ValueError( 'Generic constraints can only be compared with each other' ) if isinstance(provider, EmptyConstraint): return True is_equal_op = self.OP_EQ is self._operator is_non_equal_op = self.OP_NE is self._operator is_provider_equal_op = self.OP_EQ is provider.operator is_provider_non_equal_op = self.OP_NE is provider.operator if ( is_equal_op and is_provider_equal_op or is_non_equal_op and is_provider_non_equal_op ): return self._version == provider.version if ( is_equal_op and is_provider_non_equal_op or is_non_equal_op and is_provider_equal_op ): return self._version != provider.version return False @classmethod def parse(cls, constraints): """ Parses a constraint string into MultiConstraint and/or PlatformConstraint objects. """ pretty_constraint = constraints or_constraints = re.split('\s*\|\|?\s*', constraints.strip()) or_groups = [] for constraints in or_constraints: and_constraints = re.split( '(? 1: constraint_objects = [] for constraint in and_constraints: for parsed_constraint in cls._parse_constraint(constraint): constraint_objects.append(parsed_constraint) else: constraint_objects = cls._parse_constraint(and_constraints[0]) if len(constraint_objects) == 1: constraint = constraint_objects[0] else: constraint = MultiConstraint(constraint_objects) or_groups.append(constraint) if len(or_groups) == 1: constraint = or_groups[0] else: constraint = MultiConstraint(or_groups, False) constraint.pretty_string = pretty_constraint return constraint @classmethod def _parse_constraint(cls, constraint): m = re.match('(?i)^v?[xX*](\.[xX*])*$', constraint) if m: return EmptyConstraint(), # Basic Comparators m = re.match('^(!=|==?)?\s*(.*)', constraint) if m: return GenericConstraint(m.group(1) or '=', m.group(2)), raise ValueError( 'Could not parse generic constraint: {}'.format(constraint) ) def __str__(self): op = self._trans_op_int[self._operator] if op == '==': op = '' else: op = op + ' ' return '{}{}'.format( op, self._version ) def __repr__(self): return ''.format(str(self)) PK!\iBq==poetry/packages/dependency.pyimport poetry.packages from poetry.semver.constraints import Constraint from poetry.semver.constraints import EmptyConstraint from poetry.semver.constraints import MultiConstraint from poetry.semver.constraints.base_constraint import BaseConstraint from poetry.semver.version_parser import VersionParser from .constraints.generic_constraint import GenericConstraint class Dependency(object): def __init__(self, name, # type: str constraint, # type: str optional=False, # type: bool category='main', # type: str allows_prereleases=False # type: bool ): self._name = name.lower() self._pretty_name = name self._parser = VersionParser() try: if not isinstance(constraint, BaseConstraint): self._constraint = self._parser.parse_constraints(constraint) else: self._constraint = constraint except ValueError: self._constraint = self._parser.parse_constraints('*') self._pretty_constraint = constraint self._optional = optional self._category = category self._allows_prereleases = allows_prereleases self._python_versions = '*' self._python_constraint = self._parser.parse_constraints('*') self._platform = '*' self._platform_constraint = EmptyConstraint() self._extras = [] self._in_extras = [] @property def name(self): return self._name @property def constraint(self): return self._constraint @property def pretty_constraint(self): return self._pretty_constraint @property def pretty_name(self): return self._pretty_name @property def category(self): return self._category @property def python_versions(self): return self._python_versions @python_versions.setter def python_versions(self, value): self._python_versions = value self._python_constraint = self._parser.parse_constraints(value) @property def python_constraint(self): return self._python_constraint @property def platform(self): return self._platform @platform.setter def platform(self, value): self._platform = value self._platform_constraint = GenericConstraint.parse(value) @property def platform_constraint(self): return self._platform_constraint @property def extras(self): # type: () -> list return self._extras @property def in_extras(self): # type: () -> list return self._in_extras def allows_prereleases(self): return self._allows_prereleases def is_optional(self): return self._optional def is_vcs(self): return False def is_file(self): return False def accepts(self, package): # type: (poetry.packages.Package) -> bool """ Determines if the given package matches this dependency. """ return ( self._name == package.name and self._constraint.matches(Constraint('=', package.version)) and (not package.is_prerelease() or self.allows_prereleases()) ) def to_pep_508(self, with_extras=True): # type: (bool) -> str requirement = self.pretty_name if isinstance(self.constraint, MultiConstraint): requirement += ' ({})'.format(','.join( [str(c).replace(' ', '') for c in self.constraint.constraints] )) else: requirement += ' ({})'.format(str(self.constraint).replace(' ', '')) # Markers markers = [] # Python marker if self.python_versions != '*': python_constraint = self.python_constraint markers.append( self._create_nested_marker('python_version', python_constraint) ) in_extras = ' || '.join(self._in_extras) if in_extras and with_extras: markers.append( self._create_nested_marker( 'extra', GenericConstraint.parse(in_extras) ) ) if markers: if len(markers) > 1: markers = ['({})'.format(m) for m in markers] requirement += '; {}'.format(' and '.join(markers)) else: requirement += '; {}'.format(markers[0]) return requirement def _create_nested_marker(self, name, constraint): if isinstance(constraint, MultiConstraint): parts = [] for c in constraint.constraints: multi = False if isinstance(c, MultiConstraint): multi = True parts.append((multi, self._create_nested_marker(name, c))) glue = ' and ' if constraint.is_disjunctive(): parts = [ '({})'.format(part[1]) if part[0] else part[1] for part in parts ] glue = ' or ' else: parts = [part[1] for part in parts] marker = glue.join(parts) else: marker = '{} {} "{}"'.format( name, constraint.string_operator, constraint.version ) return marker def activate(self): """ Set the dependency as mandatory. """ self._optional = False def deactivate(self): """ Set the dependency as optional. """ self._optional = True def __eq__(self, other): if not isinstance(other, Dependency): return NotImplemented return self._name == other.name and self._constraint == other.constraint def __hash__(self): return hash((self._name, self._pretty_constraint)) def __str__(self): return '{} ({})'.format( self._pretty_name, self._pretty_constraint ) def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, str(self)) PK!"poetry/packages/file_dependency.pyimport hashlib import io import pkginfo from pkginfo.distribution import HEADER_ATTRS from pkginfo.distribution import HEADER_ATTRS_2_0 from poetry.utils._compat import Path from .dependency import Dependency # Patching pkginfo to support Metadata version 2.1 (PEP 566) HEADER_ATTRS.update( { '2.1': HEADER_ATTRS_2_0 + ( ('Provides-Extra', 'provides_extra', True), ) } ) class FileDependency(Dependency): def __init__(self, path, # type: Path category='main', # type: str optional=False, # type: bool base=None # type: Path ): self._path = path self._base = base self._full_path = path if self._base and not self._path.is_absolute(): self._full_path = self._base / self._path if not self._full_path.exists(): raise ValueError('File {} does not exist'.format(self._path)) if self._full_path.is_dir(): raise ValueError( '{} is a directory, expected a file'.format(self._path) ) if self._path.suffix == '.whl': self._meta = pkginfo.Wheel(str(self._full_path)) else: # Assume sdist self._meta = pkginfo.SDist(str(self._full_path)) super(FileDependency, self).__init__( self._meta.name, self._meta.version, category=category, optional=optional, allows_prereleases=True ) @property def path(self): return self._path @property def full_path(self): return self._full_path.resolve() @property def metadata(self): return self._meta def is_file(self): return True def hash(self): h = hashlib.sha256() with self._path.open('rb') as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b''): h.update(content) return h.hexdigest() PK!!zyypoetry/packages/locker.pyimport json import poetry.packages import poetry.repositories from hashlib import sha256 from typing import List from poetry.utils._compat import Path from poetry.utils.toml_file import TomlFile class Locker: _relevant_keys = [ 'name', 'version', 'dependencies', 'dev-dependencies', 'source', ] def __init__(self, lock, local_config): # type: (Path, dict) -> None self._lock = TomlFile(lock) self._local_config = local_config self._lock_data = None self._content_hash = self._get_content_hash() @property def lock(self): # type: () -> TomlFile return self._lock @property def lock_data(self): if self._lock_data is None: self._lock_data = self._get_lock_data() return self._lock_data def is_locked(self): # type: () -> bool """ Checks whether the locker has been locked (lockfile found). """ if not self._lock.exists(): return False return 'package' in self.lock_data def is_fresh(self): # type: () -> bool """ Checks whether the lock file is still up to date with the current hash. """ lock = self._lock.read(True) metadata = lock.get('metadata', {}) if 'content-hash' in metadata: return self._content_hash == lock['metadata']['content-hash'] return False def locked_repository(self, with_dev_reqs=False ): # type: (bool) -> poetry.repositories.Repository """ Searches and returns a repository of locked packages. """ if not self.is_locked(): return poetry.repositories.Repository() lock_data = self.lock_data packages = poetry.repositories.Repository() if with_dev_reqs: locked_packages = lock_data['package'] else: locked_packages = [ p for p in lock_data['package'] if p['category'] == 'main' ] if not locked_packages: return packages for info in locked_packages: package = poetry.packages.Package( info['name'], info['version'], info['version'] ) package.description = info.get('description', '') package.category = info['category'] package.optional = info['optional'] package.hashes = lock_data['metadata']['hashes'][info['name']] package.python_versions = info['python-versions'] for dep_name, constraint in info.get('dependencies', {}).items(): package.add_dependency(dep_name, constraint) if 'requirements' in info: package.requirements = info['requirements'] if 'source' in info: package.source_type = info['source']['type'] package.source_url = info['source']['url'] package.source_reference = info['source']['reference'] packages.add_package(package) return packages def set_lock_data(self, root, packages): # type: () -> bool hashes = {} packages = self._lock_packages(packages) # Retrieving hashes for package in packages: hashes[package['name']] = package['hashes'] del package['hashes'] lock = { 'package': packages, 'metadata': { 'python-versions': root.python_versions, 'platform': root.platform, 'content-hash': self._content_hash, 'hashes': hashes, } } if root.extras: lock['extras'] = { extra: [dep.pretty_name for dep in deps] for extra, deps in root.extras.items() } if not self.is_locked() or lock != self.lock_data: self._write_lock_data(lock) return True return False def _write_lock_data(self, data): self._lock.write(data) self._lock_data = None def _get_content_hash(self): # type: () -> str """ Returns the sha256 hash of the sorted content of the composer file. """ content = self._local_config relevant_content = {} for key in self._relevant_keys: relevant_content[key] = content.get(key) content_hash = sha256( json.dumps(relevant_content, sort_keys=True).encode() ).hexdigest() return content_hash def _get_lock_data(self): # type: () -> dict if not self._lock.exists(): raise RuntimeError( 'No lockfile found. Unable to read locked packages' ) return self._lock.read(True) def _lock_packages(self, packages ): # type: (List['poetry.packages.Package']) -> list locked = [] for package in sorted(packages, key=lambda x: x.name): spec = self._dump_package(package) locked.append(spec) return locked def _dump_package(self, package ): # type: (poetry.packages.Package) -> dict dependencies = {} for dependency in package.requires: if dependency.is_optional(): continue dependencies[dependency.pretty_name] = str(dependency.pretty_constraint) data = { 'name': package.pretty_name, 'version': package.pretty_version, 'description': package.description, 'category': package.category, 'optional': package.optional, 'python-versions': package.python_versions, 'platform': package.platform, 'hashes': package.hashes, 'dependencies': dependencies } if package.source_type: data['source'] = { 'type': package.source_type, 'url': package.source_url, 'reference': package.source_reference } if package.requirements: data['requirements'] = package.requirements return data PK!d_#_#poetry/packages/package.py# -*- coding: utf-8 -*- import copy import re from typing import Union from poetry.semver.constraints import Constraint from poetry.semver.constraints import EmptyConstraint from poetry.semver.helpers import parse_stability from poetry.semver.version_parser import VersionParser from poetry.spdx import license_by_id from poetry.spdx import License from poetry.utils._compat import Path from poetry.version import parse as parse_version from .constraints.generic_constraint import GenericConstraint from .dependency import Dependency from .file_dependency import FileDependency from .vcs_dependency import VCSDependency AUTHOR_REGEX = re.compile('(?u)^(?P[- .,\w\d\'’"()]+) <(?P.+?)>$') class Package(object): AVAILABLE_PYTHONS = { '2', '2.7', '3', '3.4', '3.5', '3.6', '3.7' } supported_link_types = { 'require': { 'description': 'requires', 'method': 'requires' }, 'provide': { 'description': 'provides', 'method': 'provides' } } STABILITY_STABLE = 0 STABILITY_RC = 5 STABILITY_BETA = 10 STABILITY_ALPHA = 15 STABILITY_DEV = 20 stabilities = { 'stable': STABILITY_STABLE, 'rc': STABILITY_RC, 'beta': STABILITY_BETA, 'alpha': STABILITY_ALPHA, 'dev': STABILITY_DEV, } def __init__(self, name, version, pretty_version=None): """ Creates a new in memory package. """ self._pretty_name = name self._name = name.lower() self._version = str(parse_version(version)) self._pretty_version = pretty_version or version self.description = '' self._stability = parse_stability(version) self._dev = self._stability == 'dev' self._authors = [] self.homepage = None self.repository_url = None self.keywords = [] self._license = None self.readme = None self.source_type = '' self.source_reference = '' self.source_url = '' self.requires = [] self.dev_requires = [] self.extras = {} self._parser = VersionParser() self.category = 'main' self.hashes = [] self.optional = False # Requirements for making it mandatory self.requirements = {} self.build = None self.include = [] self.exclude = [] self.classifiers = [] self._python_versions = '*' self._python_constraint = self._parser.parse_constraints('*') self._platform = '*' self._platform_constraint = EmptyConstraint() self.cwd = None @property def name(self): return self._name @property def pretty_name(self): return self._pretty_name @property def version(self): return self._version @property def pretty_version(self): return self._pretty_version @property def unique_name(self): return self.name + '-' + self._version @property def pretty_string(self): return self.pretty_name + ' ' + self.pretty_version @property def full_pretty_version(self): if not self._dev and self.source_type not in ['hg', 'git']: return self._pretty_version # if source reference is a sha1 hash -- truncate if len(self.source_reference) == 40: return '{} {}'.format(self._pretty_version, self.source_reference[0:7]) return '{} {}'.format(self._pretty_version, self.source_reference) @property def authors(self): # type: () -> list return self._authors @property def author_name(self): # type: () -> str return self._get_author()['name'] @property def author_email(self): # type: () -> str return self._get_author()['email'] def _get_author(self): # type: () -> dict if not self._authors: return { 'name': None, 'email': None } m = AUTHOR_REGEX.match(self._authors[0]) name = m.group('name') email = m.group('email') return { 'name': name, 'email': email } @property def python_versions(self): return self._python_versions @python_versions.setter def python_versions(self, value): self._python_versions = value self._python_constraint = self._parser.parse_constraints(value) @property def python_constraint(self): return self._python_constraint @property def platform(self): # type: () -> str return self._platform @platform.setter def platform(self, value): # type: (str) -> None self._platform = value self._platform_constraint = GenericConstraint.parse(value) @property def platform_constraint(self): return self._platform_constraint @property def license(self): return self._license @license.setter def license(self, value): if value is None: self._license = value elif isinstance(value, License): self._license = value else: self._license = license_by_id(value) @property def all_classifiers(self): classifiers = copy.copy(self.classifiers) # Automatically set python classifiers parser = VersionParser() if self.python_versions == '*': python_constraint = parser.parse_constraints('~2.7 || ^3.4') else: python_constraint = self.python_constraint for version in sorted(self.AVAILABLE_PYTHONS): if len(version) == 1: constraint = parser.parse_constraints(version + '.*') else: constraint = Constraint('=', version) if python_constraint.matches(constraint): classifiers.append( 'Programming Language :: Python :: {}'.format(version) ) # Automatically set license classifiers if self.license: classifiers.append(self.license.classifier) classifiers = set(classifiers) return sorted(classifiers) def is_dev(self): return self._dev def is_prerelease(self): return self._stability != 'stable' def add_dependency(self, name, # type: str constraint=None, # type: Union[str, dict, None] category='main' # type: str ): # type: (...) -> Dependency if constraint is None: constraint = '*' if isinstance(constraint, dict): optional = constraint.get('optional', False) python_versions = constraint.get('python') platform = constraint.get('platform') allows_prereleases = constraint.get('allows-prereleases', False) if 'git' in constraint: # VCS dependency dependency = VCSDependency( name, 'git', constraint['git'], branch=constraint.get('branch', None), tag=constraint.get('tag', None), rev=constraint.get('rev', None), optional=optional, ) if python_versions: dependency.python_versions = python_versions if platform: dependency.platform = platform elif 'file' in constraint: file_path = Path(constraint['file']) dependency = FileDependency(file_path, base=self.cwd) else: version = constraint['version'] dependency = Dependency( name, version, optional=optional, category=category, allows_prereleases=allows_prereleases ) if python_versions: dependency.python_versions = python_versions if platform: dependency.platform = platform if 'extras' in constraint: for extra in constraint['extras']: dependency.extras.append(extra) else: dependency = Dependency(name, constraint, category=category) if category == 'dev': self.dev_requires.append(dependency) else: self.requires.append(dependency) return dependency def __hash__(self): return hash((self._name, self._version)) def __eq__(self, other): if not isinstance(other, Package): return NotImplemented return self._name == other.name and self._version == other.version def __str__(self): return self.unique_name def __repr__(self): return ''.format(self.unique_name) PK!"poetry/packages/project_package.pyPK!!poetry/packages/utils/__init__.pyPK!{?poetry/packages/utils/link.pyimport posixpath try: import urllib.parse as urlparse except ImportError: import urlparse import re from .utils import path_to_url from .utils import splitext class Link: def __init__(self, url, comes_from=None, requires_python=None): """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) comes_from: instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. """ # url can be a UNC windows share if url.startswith('\\\\'): url = path_to_url(url) self.url = url self.comes_from = comes_from self.requires_python = requires_python if requires_python else None def __str__(self): if self.requires_python: rp = ' (requires-python:%s)' % self.requires_python else: rp = '' if self.comes_from: return '%s (from %s)%s' % (self.url, self.comes_from, rp) else: return str(self.url) def __repr__(self): return '' % self def __eq__(self, other): if not isinstance(other, Link): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Link): return NotImplemented return self.url != other.url def __lt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url < other.url def __le__(self, other): if not isinstance(other, Link): return NotImplemented return self.url <= other.url def __gt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url > other.url def __ge__(self, other): if not isinstance(other, Link): return NotImplemented return self.url >= other.url def __hash__(self): return hash(self.url) @property def filename(self): _, netloc, path, _, _ = urlparse.urlsplit(self.url) name = posixpath.basename(path.rstrip('/')) or netloc name = urlparse.unquote(name) assert name, ('URL %r produced no filename' % self.url) return name @property def scheme(self): return urlparse.urlsplit(self.url)[0] @property def netloc(self): return urlparse.urlsplit(self.url)[1] @property def path(self): return urlparse.unquote(urlparse.urlsplit(self.url)[2]) def splitext(self): return splitext(posixpath.basename(self.path.rstrip('/'))) @property def ext(self): return self.splitext()[1] @property def url_without_fragment(self): scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url) return urlparse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') @property def subdirectory_fragment(self): match = self._subdirectory_fragment_re.search(self.url) if not match: return None return match.group(1) _hash_re = re.compile( r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' ) @property def hash(self): match = self._hash_re.search(self.url) if match: return match.group(2) return None @property def hash_name(self): match = self._hash_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) @property def is_wheel(self): return self.ext == '.whl' @property def is_artifact(self): """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ if self.scheme in ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']: return False return True PK!3[ [ poetry/packages/utils/utils.pyimport os import posixpath import re try: import urllib.parse as urlparse except ImportError: import urlparse try: import urllib.request as urllib2 except ImportError: import urllib2 BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma') ZIP_EXTENSIONS = ('.zip', '.whl') TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') ARCHIVE_EXTENSIONS = ( ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS) SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS try: import bz2 # noqa SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS except ImportError: pass try: # Only for Python 3.3+ import lzma # noqa SUPPORTED_EXTENSIONS += XZ_EXTENSIONS except ImportError: pass def path_to_url(path): """ Convert a path to a file: URL. The path will be made absolute and have quoted path parts. """ path = os.path.normpath(os.path.abspath(path)) url = urlparse.urljoin('file:', urllib2.pathname2url(path)) return url def is_url(name): if ':' not in name: return False scheme = name.split(':', 1)[0].lower() return scheme in [ 'http', 'https', 'file', 'ftp', 'ssh', 'git', 'hg', 'bzr', 'sftp', 'svn' 'ssh' ] def strip_extras(path): m = re.match(r'^(.+)(\[[^\]]+\])$', path) extras = None if m: path_no_extras = m.group(1) extras = m.group(2) else: path_no_extras = path return path_no_extras, extras def is_installable_dir(path): """Return True if `path` is a directory containing a setup.py file.""" if not os.path.isdir(path): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True return False def is_archive_file(name): """Return True if `name` is a considered as an archive file.""" ext = splitext(name)[1].lower() if ext in ARCHIVE_EXTENSIONS: return True return False def splitext(path): """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext def group_markers(markers): groups = [[]] for marker in markers: assert isinstance(marker, (list, tuple, str)) if isinstance(marker, list): groups[-1].append(group_markers(marker)) elif isinstance(marker, tuple): lhs, op, rhs = marker groups[-1].append((lhs.value, op, rhs.value)) else: assert marker in ["and", "or"] if marker == "or": groups.append([]) return groups def convert_markers(markers): groups = group_markers(markers) requirements = {} def _group(_groups, or_=False): for group in _groups: if isinstance(group, tuple): variable, op, value = group group_name = str(variable) if group_name not in requirements: requirements[group_name] = [[]] elif or_: requirements[group_name].append([]) or_ = False requirements[group_name][-1].append((str(op), str(value))) else: _group(group, or_=True) _group(groups) return requirements PK!I[!poetry/packages/vcs_dependency.pyfrom .dependency import Dependency class VCSDependency(Dependency): """ Represents a VCS dependency """ def __init__(self, name, vcs, source, branch=None, tag=None, rev=None, optional=False): self._vcs = vcs self._source = source if not any([branch, tag, rev]): # If nothing has been specified, we assume master branch = 'master' self._branch = branch self._tag = tag self._rev = rev super(VCSDependency, self).__init__( name, '*', optional=optional, allows_prereleases=True ) @property def vcs(self): return self._vcs @property def source(self): return self._source @property def branch(self): return self._branch @property def tag(self): return self._tag @property def rev(self): return self._rev @property def reference(self): # type: () -> str return self._branch or self._tag or self._rev @property def pretty_constraint(self): # type: () -> str if self._branch: what = 'branch' version = self._branch elif self._tag: what = 'tag' version = self._tag else: what = 'rev' version = self._rev return '{} {}'.format(what, version) def is_vcs(self): # type: () -> bool return True def accepts_prereleases(self): return True PK!kpoetry/poetry.pyfrom __future__ import absolute_import from __future__ import unicode_literals import json import jsonschema from .__version__ import __version__ from .config import Config from .exceptions import InvalidProjectFile from .packages import Dependency from .packages import Locker from .packages import Package from .repositories import Pool from .repositories.pypi_repository import PyPiRepository from .spdx import license_by_id from .utils._compat import Path from .utils.toml_file import TomlFile class Poetry: VERSION = __version__ def __init__(self, file, # type: Path local_config, # type: dict package, # type: Package locker # type: Locker ): self._file = TomlFile(file) self._package = package self._local_config = local_config self._locker = locker self._config = Config.create('config.toml') # Configure sources self._pool = Pool() for source in self._local_config.get('source', []): self._pool.configure(source) # Always put PyPI last to prefere private repositories self._pool.add_repository( PyPiRepository( fallback=self._config.setting('settings.pypi.fallback', True) ) ) @property def file(self): return self._file @property def package(self): # type: () -> Package return self._package @property def local_config(self): # type: () -> dict return self._local_config @property def locker(self): # type: () -> Locker return self._locker @property def pool(self): # type: () -> Pool return self._pool @classmethod def create(cls, cwd): # type: () -> Poetry poetry_file = Path(cwd) / 'pyproject.toml' if not poetry_file.exists(): raise RuntimeError( 'Poetry could not find a pyproject.toml file in {}'.format(cwd) ) local_config = TomlFile(poetry_file.as_posix()).read(True) if 'tool' not in local_config or 'poetry' not in local_config['tool']: raise RuntimeError( '[tool.poetry] section not found in {}'.format(poetry_file.name) ) local_config = local_config['tool']['poetry'] # Checking validity cls.check(local_config) # Load package name = local_config['name'] version = local_config['version'] package = Package(name, version, version) package.cwd = Path(cwd) for author in local_config['authors']: package.authors.append(author) package.description = local_config.get('description', '') package.homepage = local_config.get('homepage') package.repository_url = local_config.get('repository') package.license = local_config.get('license') package.keywords = local_config.get('keywords', []) package.classifiers = local_config.get('classifiers', []) if 'readme' in local_config: package.readme = Path(cwd) / local_config['readme'] if 'platform' in local_config: package.platform = local_config['platform'] if 'dependencies' in local_config: for name, constraint in local_config['dependencies'].items(): if name.lower() == 'python': package.python_versions = constraint continue package.add_dependency(name, constraint) if 'dev-dependencies' in local_config: for name, constraint in local_config['dev-dependencies'].items(): package.add_dependency(name, constraint, category='dev') extras = local_config.get('extras', {}) for extra_name, requirements in extras.items(): package.extras[extra_name] = [] # Checking for dependency for req in requirements: req = Dependency(req, '*') for dep in package.requires: if dep.name == req.name: dep.in_extras.append(extra_name) package.extras[extra_name].append(dep) break if 'build' in local_config: package.build = local_config['build'] if 'include' in local_config: package.include = local_config['include'] if 'exclude' in local_config: package.exclude = local_config['exclude'] locker = Locker(poetry_file.with_suffix('.lock'), local_config) return cls(poetry_file, local_config, package, locker) @classmethod def check(cls, config, strict=False): # type: (dict, bool) -> bool """ Checks the validity of a configuration """ schema = ( Path(__file__).parent / 'json' / 'schemas' / 'poetry-schema.json' ) with schema.open() as f: schema = json.loads(f.read()) try: jsonschema.validate( config, schema ) except jsonschema.ValidationError as e: message = e.message if e.path: message = "[{}] {}".format( '.'.join(e.path), message ) raise InvalidProjectFile(message) if strict: # If strict, check the file more thoroughly # Checking license license = config.get('license') if license: try: license_by_id(license) except ValueError: raise InvalidProjectFile('Invalid license') return True PK!-;}poetry/puzzle/__init__.pyfrom .solver import Solver PK!` poetry/puzzle/exceptions.pyclass SolverProblemError(Exception): def __init__(self, error): self._error = error super(SolverProblemError, self).__init__(str(error)) @property def error(self): return self._error PK!,MYY$poetry/puzzle/operations/__init__.pyfrom .install import Install from .uninstall import Uninstall from .update import Update PK!Md#poetry/puzzle/operations/install.pyfrom .operation import Operation class Install(Operation): def __init__(self, package, reason=None): super(Install, self).__init__(reason) self._package = package @property def package(self): return self._package @property def job_type(self): return 'install' def __str__(self): return 'Installing {} ({})'.format( self.package.pretty_name, self.format_version(self.package) ) def __repr__(self): return ''.format( self.package.pretty_name, self.format_version(self.package) ) PK!be^^%poetry/puzzle/operations/operation.py# -*- coding: utf-8 -*- from typing import Union class Operation(object): def __init__(self, reason=None): # type: (Union[str, None]) -> None self._reason = reason self._skipped = False self._skip_reason = None @property def job_type(self): # type: () -> str raise NotImplementedError @property def reason(self): # type: () -> str return self._reason @property def skipped(self): # type: () -> bool return self._skipped @property def skip_reason(self): # type: () -> Union[str, None] return self._skip_reason def format_version(self, package): # type: (...) -> str return package.full_pretty_version def skip(self, reason): # type: (str) -> Operation self._skipped = True self._skip_reason = reason return self PK!ct%poetry/puzzle/operations/uninstall.pyfrom .operation import Operation class Uninstall(Operation): def __init__(self, package, reason=None): super(Uninstall, self).__init__(reason) self._package = package @property def package(self): return self._package @property def job_type(self): return 'uninstall' def __str__(self): return 'Uninstalling {} ({})'.format( self.package.pretty_name, self.format_version(self._package) ) def __repr__(self): return ''.format( self.package.pretty_name, self.format_version(self.package) ) PK! 룹"poetry/puzzle/operations/update.pyfrom .operation import Operation class Update(Operation): def __init__(self, initial, target, reason=None): self._initial_package = initial self._target_package = target super(Update, self).__init__(reason) @property def initial_package(self): return self._initial_package @property def target_package(self): return self._target_package @property def package(self): return self._target_package @property def job_type(self): return 'update' def __str__(self): return ( 'Updating {} ({}) to {} ({})'.format( self.initial_package.pretty_name, self.format_version(self.initial_package), self.target_package.pretty_name, self.format_version(self.target_package) ) ) def __repr__(self): return ( ''.format( self.initial_package.pretty_name, self.format_version(self.initial_package), self.target_package.pretty_name, self.format_version(self.target_package) ) ) PK!Ͻ##poetry/puzzle/provider.pyimport os import shutil from functools import cmp_to_key from tempfile import mkdtemp from typing import Dict from typing import List from poetry.mixology import DependencyGraph from poetry.mixology.conflict import Conflict from poetry.mixology.contracts import SpecificationProvider from poetry.packages import Dependency from poetry.packages import FileDependency from poetry.packages import Package from poetry.packages import VCSDependency from poetry.packages import dependency_from_pep_508 from poetry.repositories import Pool from poetry.semver import less_than from poetry.utils._compat import Path from poetry.utils.toml_file import TomlFile from poetry.utils.venv import Venv from poetry.vcs.git import Git class Provider(SpecificationProvider): UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'} def __init__(self, package, # type: Package pool, # type: Pool io ): self._package = package self._pool = pool self._io = io self._python_constraint = package.python_constraint self._base_dg = DependencyGraph() self._search_for = {} @property def pool(self): # type: () -> Pool return self._pool @property def name_for_explicit_dependency_source(self): # type: () -> str return 'pyproject.toml' @property def name_for_locking_dependency_source(self): # type: () -> str return 'pyproject.lock' def name_for(self, dependency): # type: (Dependency) -> str """ Returns the name for the given dependency. """ return dependency.name def search_for(self, dependency): # type: (Dependency) -> List[Package] """ Search for the specifications that match the given dependency. The specifications in the returned list will be considered in reverse order, so the latest version ought to be last. """ if dependency in self._search_for: return self._search_for[dependency] if dependency.is_vcs(): packages = self.search_for_vcs(dependency) elif dependency.is_file(): packages = self.search_for_file(dependency) else: packages = self._pool.find_packages( dependency.name, dependency.constraint, extras=dependency.extras, ) packages.sort( key=cmp_to_key( lambda x, y: 0 if x.version == y.version else -1 * int(less_than(x.version, y.version) or -1) ) ) self._search_for[dependency] = packages return self._search_for[dependency] def search_for_vcs(self, dependency): # type: (VCSDependency) -> List[Package] """ Search for the specifications that match the given VCS dependency. Basically, we clone the repository in a temporary directory and get the information we need by checking out the specified reference. """ if dependency.vcs != 'git': raise ValueError( 'Unsupported VCS dependency {}'.format(dependency.vcs) ) tmp_dir = Path( mkdtemp(prefix='pypoetry-git-{}'.format(dependency.name)) ) try: git = Git() git.clone(dependency.source, tmp_dir) git.checkout(dependency.reference, tmp_dir) revision = git.rev_parse( dependency.reference, tmp_dir ).strip() if dependency.tag or dependency.rev: revision = dependency.reference pyproject = TomlFile(tmp_dir / 'pyproject.toml') pyproject_content = None has_poetry = False if pyproject.exists(): pyproject_content = pyproject.read(True) has_poetry = ( 'tool' in pyproject_content and 'poetry' in pyproject_content['tool'] ) if pyproject_content and has_poetry: # If a pyproject.toml file exists # We use it to get the information we need info = pyproject_content['tool']['poetry'] name = info['name'] version = info['version'] package = Package(name, version, version) for req_name, req_constraint in info['dependencies'].items(): if req_name == 'python': package.python_versions = req_constraint continue package.add_dependency(req_name, req_constraint) else: # We need to use setup.py here # to figure the information we need # We need to place ourselves in the proper # folder for it to work current_dir = os.getcwd() os.chdir(tmp_dir.as_posix()) try: venv = Venv.create(self._io) output = venv.run( 'python', 'setup.py', '--name', '--version' ) output = output.split('\n') name = output[-3] version = output[-2] package = Package(name, version, version) # Figure out a way to get requirements except Exception: raise finally: os.chdir(current_dir) package.source_type = 'git' package.source_url = dependency.source package.source_reference = revision except Exception: raise finally: shutil.rmtree(tmp_dir.as_posix()) return [package] def search_for_file(self, dependency ): # type: (FileDependency) -> List[Package] package = Package(dependency.name, dependency.pretty_constraint) package.source_type = 'file' package.source_reference = str(dependency.path) package.description = dependency.metadata.summary for req in dependency.metadata.requires_dist: package.requires.append(dependency_from_pep_508(req)) if dependency.metadata.requires_python: package.python_versions = dependency.metadata.requires_python if dependency.metadata.platforms: package.platform = ' || '.join(dependency.metadata.platforms) package.hashes = [dependency.hash()] return [package] def dependencies_for(self, package): # type: (Package) -> List[Dependency] if package.source_type in ['git', 'file']: # Information should already be set pass else: complete_package = self._pool.package(package.name, package.version) # Update package with new information package.requires = complete_package.requires package.description = complete_package.description package.python_versions = complete_package.python_versions package.platform = complete_package.platform package.hashes = complete_package.hashes return [ r for r in package.requires if not r.is_optional() and r.name not in self.UNSAFE_PACKAGES ] def is_requirement_satisfied_by(self, requirement, # type: Dependency activated, # type: DependencyGraph package # type: Package ): # type: (...) -> bool """ Determines whether the given requirement is satisfied by the given spec, in the context of the current activated dependency graph. """ if isinstance(requirement, Package): return requirement == package if not requirement.accepts(package): return False if package.is_prerelease() and not requirement.allows_prereleases(): vertex = activated.vertex_named(package.name) if not any([r.allows_prereleases() for r in vertex.requirements]): return False return ( self._package.python_constraint.matches(package.python_constraint) and self._package.platform_constraint.matches(package.platform_constraint) ) def sort_dependencies(self, dependencies, # type: List[Dependency] activated, # type: DependencyGraph conflicts # type: Dict[str, List[Conflict]] ): # type: (...) -> List[Dependency] return sorted(dependencies, key=lambda d: [ 0 if activated.vertex_named(d.name).payload else 1, 0 if activated.vertex_named(d.name).root else 1, 0 if d.allows_prereleases() else 1, 0 if d.name in conflicts else 1 ]) PK!qzZZpoetry/puzzle/solver.pyfrom typing import List from poetry.mixology import Resolver from poetry.mixology.dependency_graph import DependencyGraph from poetry.mixology.exceptions import ResolverError from poetry.semver.version_parser import VersionParser from .exceptions import SolverProblemError from .operations import Install from .operations import Uninstall from .operations import Update from .operations.operation import Operation from .provider import Provider from .ui import UI class Solver: def __init__(self, package, pool, installed, locked, io): self._package = package self._pool = pool self._installed = installed self._locked = locked self._io = io def solve(self, requested, fixed=None): # type: (...) -> List[Operation] resolver = Resolver( Provider(self._package, self._pool, self._io), UI(self._io) ) base = None if fixed is not None: base = DependencyGraph() for fixed_req in fixed: base.add_vertex(fixed_req.name, fixed_req, True) try: graph = resolver.resolve(requested, base=base) except ResolverError as e: raise SolverProblemError(e) packages = [v.payload for v in graph.vertices.values()] # Setting info for vertex in graph.vertices.values(): tags = self._get_tags_for_vertex(vertex, requested) if 'main' in tags['category']: vertex.payload.category = 'main' else: vertex.payload.category = 'dev' if not tags['optional']: vertex.payload.optional = False else: vertex.payload.optional = True # Finding the less restrictive requirements requirements = {} parser = VersionParser() for req_name, reqs in tags['requirements'].items(): for req in reqs: if req_name == 'python': if 'python' not in requirements: requirements['python'] = req continue previous = parser.parse_constraints(requirements['python']) current = parser.parse_constraints(req) if current.matches(previous): requirements['python'] = req if req_name == 'platform': if 'platform' not in requirements: requirements['platform'] = req continue vertex.payload.requirements = requirements operations = [] for package in packages: installed = False for pkg in self._installed.packages: if package.name == pkg.name: installed = True # Checking version if package.version != pkg.version: operations.append(Update(pkg, package)) else: operations.append( Install(package).skip('Already installed') ) break if not installed: operations.append(Install(package)) # Checking for removals for pkg in self._locked.packages: remove = True for package in packages: if pkg.name == package.name: remove = False break if remove: skip = True for installed in self._installed.packages: if installed.name == pkg.name: skip = False break op = Uninstall(pkg) if skip: op.skip('Not currently installed') operations.append(op) requested_names = [r.name for r in requested] return sorted( operations, key=lambda o: ( 1 if not o.package.name not in requested_names else 0, o.package.name ) ) def _get_tags_for_vertex(self, vertex, requested): tags = { 'category': [], 'optional': True, 'requirements': { 'python': [], 'platform': [] } } if not vertex.incoming_edges: # Original dependency for req in requested: if req.name == vertex.name: tags['category'].append(req.category) if not req.is_optional(): tags['optional'] = False if req.python_versions != '*': tags['requirements']['python'].append(str(req.python_constraint)) if req.platform != '*': tags['requirements']['platform'].append(str(req.platform_constraint)) break else: for edge in vertex.incoming_edges: for req in edge.origin.payload.requires: if req.name == vertex.payload.name: if req.python_versions != '*': tags['requirements']['python'].append(req.python_versions) if req.platform != '*': tags['requirements']['platform'].append(req.platform) sub_tags = self._get_tags_for_vertex(edge.origin, requested) tags['category'] += sub_tags['category'] tags['optional'] = tags['optional'] and sub_tags['optional'] requirements = sub_tags['requirements'] tags['requirements']['python'] += requirements.get('python', []) tags['requirements']['platform'] += requirements.get('platform', []) return tags PK!poetry/puzzle/ui.pyfrom cleo.styles import CleoStyle from poetry.mixology.contracts import UI as BaseUI class UI(BaseUI): def __init__(self, io): # type: (CleoStyle) -> None self._io = io self._progress = None super(UI, self).__init__(self._io.is_debug()) @property def output(self): return self._io def before_resolution(self): self._io.write('Resolving dependencies') if self.is_debugging(): self._io.new_line() def indicate_progress(self): if not self.is_debugging(): self._io.write('.') def after_resolution(self): self._io.new_line() def debug(self, message, depth): if self.is_debugging(): debug_info = str(message) debug_info = '\n'.join([ ':{}: {}'.format(str(depth).rjust(4), s) for s in debug_info.split('\n') ]) + '\n' self.output.write(debug_info) PK!y::poetry/repositories/__init__.pyfrom .pool import Pool from .repository import Repository PK!U=&poetry/repositories/base_repository.pyclass BaseRepository(object): SEARCH_FULLTEXT = 0 SEARCH_NAME = 1 def __init__(self): self._packages = [] @property def packages(self): return self._packages def has_package(self, package): raise NotImplementedError() def package(self, name, version): raise NotImplementedError() def find_packages(self, name, constraint=None, extras=None): raise NotImplementedError() def search(self, query, mode=SEARCH_FULLTEXT): raise NotImplementedError() PK!ҫ0oo+poetry/repositories/installed_repository.pyfrom poetry.packages import Package from poetry.utils.venv import Venv from .repository import Repository class InstalledRepository(Repository): @classmethod def load(cls, venv): # type: (Venv) -> InstalledRepository """ Load installed packages. For now, it uses the pip "freeze" command. """ repo = cls() freeze_output = venv.run('pip', 'freeze') for line in freeze_output.split('\n'): if '==' in line: name, version = line.split('==') repo.add_package(Package(name, version, version)) return repo PK!)(poetry/repositories/legacy_repository.pyfrom pip._vendor.pkg_resources import RequirementParseError try: from pip._internal.exceptions import InstallationError from pip._internal.req import InstallRequirement except ImportError: from pip.exceptions import InstallationError from pip.req import InstallRequirement from piptools.cache import DependencyCache from piptools.repositories import PyPIRepository from piptools.resolver import Resolver from piptools.scripts.compile import get_pip_command from cachy import CacheManager import poetry.packages from poetry.locations import CACHE_DIR from poetry.packages import Package from poetry.packages import dependency_from_pep_508 from poetry.semver.constraints import Constraint from poetry.semver.constraints.base_constraint import BaseConstraint from poetry.semver.version_parser import VersionParser from poetry.utils._compat import Path from poetry.version.markers import InvalidMarker from .pypi_repository import PyPiRepository class LegacyRepository(PyPiRepository): def __init__(self, name, url): if name == 'pypi': raise ValueError('The name [pypi] is reserved for repositories') self._packages = [] self._name = name self._url = url command = get_pip_command() opts, _ = command.parse_args([]) self._session = command._build_session(opts) self._repository = PyPIRepository(opts, self._session) self._cache_dir = Path(CACHE_DIR) / 'cache' / 'repositories' / name self._cache = CacheManager({ 'default': 'releases', 'serializer': 'json', 'stores': { 'releases': { 'driver': 'file', 'path': str(self._cache_dir) }, 'packages': { 'driver': 'dict' }, 'matches': { 'driver': 'dict' } } }) @property def name(self): return self._name def find_packages(self, name, constraint=None, extras=None): packages = [] if constraint is not None and not isinstance(constraint, BaseConstraint): version_parser = VersionParser() constraint = version_parser.parse_constraints(constraint) key = name if constraint: key = '{}:{}'.format(key, str(constraint)) if self._cache.store('matches').has(key): versions = self._cache.store('matches').get(key) else: candidates = [str(c.version) for c in self._repository.find_all_candidates(name)] versions = [] for version in candidates: if version in versions: continue if ( not constraint or (constraint and constraint.matches(Constraint('=', version))) ): versions.append(version) self._cache.store('matches').put(key, versions, 5) for version in versions: packages.append(Package(name, version, extras=extras)) return packages def package(self, name, version, extras=None ): # type: (...) -> poetry.packages.Package """ Retrieve the release information. This is a heavy task which takes time. We have to download a package to get the dependencies. We also need to download every file matching this release to get the various hashes. Note that, this will be cached so the subsequent operations should be much faster. """ try: index = self._packages.index( poetry.packages.Package(name, version, version) ) return self._packages[index] except ValueError: if extras is None: extras = [] release_info = self.get_release_info(name, version) package = poetry.packages.Package(name, version, version) for req in release_info['requires_dist']: try: dependency = dependency_from_pep_508(req) except InvalidMarker: # Invalid marker # We strip the markers hoping for the best req = req.split(';')[0] dependency = dependency_from_pep_508(req) if dependency.extras: for extra in dependency.extras: if extra not in package.extras: package.extras[extra] = [] package.extras[extra].append(dependency) if not dependency.is_optional(): package.requires.append(dependency) # Adding description package.description = release_info.get('summary', '') # Adding hashes information package.hashes = release_info['digests'] # Activate extra dependencies for extra in extras: if extra in package.extras: for dep in package.extras[extra]: dep.activate() package.requires += package.extras[extra] self._packages.append(package) return package def get_release_info(self, name, version): # type: (str, str) -> dict """ Return the release information given a package name and a version. The information is returned from the cache if it exists or retrieved from the remote server. """ return self._cache.store('releases').remember_forever( '{}:{}'.format(name, version), lambda: self._get_release_info(name, version) ) def _get_release_info(self, name, version): # type: (str, str) -> dict ireq = InstallRequirement.from_line('{}=={}'.format(name, version)) resolver = Resolver( [ireq], self._repository, cache=DependencyCache(self._cache_dir.as_posix()) ) try: requirements = list(resolver._iter_dependencies(ireq)) except (InstallationError, RequirementParseError): # setup.py egg-info error most likely # So we assume no dependencies requirements = [] requires = [] for dep in requirements: constraint = str(dep.req.specifier) require = dep.name if constraint: require += ' ({})'.format(constraint) requires.append(require) try: hashes = resolver.resolve_hashes([ireq])[ireq] except IndexError: # Sometimes pip-tools fails when getting indices hashes = [] hashes = [h.split(':')[1] for h in hashes] data = { 'name': name, 'version': version, 'summary': '', 'requires_dist': requires, 'digests': hashes } resolver.repository.freshen_build_caches() return data PK! \1 1 poetry/repositories/pool.pyfrom typing import List from typing import Union import poetry.packages from .base_repository import BaseRepository from .repository import Repository class Pool(BaseRepository): def __init__(self, repositories=None): # type: (Union[list, None]) -> None if repositories is None: repositories = [] self._repositories = [] for repository in repositories: self.add_repository(repository) super(Pool, self).__init__() @property def repositories(self): # type: () -> List[Repository] return self._repositories def add_repository(self, repository): # type: (Repository) -> Pool """ Adds a repository to the pool. """ self._repositories.append(repository) return self def configure(self, source): # type: (dict) -> Pool """ Configures a repository based on a source specification and add it to the pool. """ from .legacy_repository import LegacyRepository if 'url' in source: # PyPI-like repository if 'name' not in source: raise RuntimeError('Missing [name] in source.') repository = LegacyRepository(source['name'], source['url']) else: raise RuntimeError('Unsupported source specified') return self.add_repository(repository) def has_package(self, package): raise NotImplementedError() def package(self, name, version): package = poetry.packages.Package(name, version, version) if package in self._packages: return self._packages[self._packages.index(package)] for repository in self._repositories: package = repository.package(name, version) if package: self._packages.append(package) return package return None def find_packages(self, name, constraint=None, extras=None): for repository in self._repositories: packages = repository.find_packages(name, constraint, extras=extras) if packages: return packages return [] def search(self, query, mode=BaseRepository.SEARCH_FULLTEXT): from .legacy_repository import LegacyRepository results = [] for repository in self._repositories: if isinstance(repository, LegacyRepository): continue results += repository.search(query, mode=mode) return results PK!z7z7&poetry/repositories/pypi_repository.pyimport os import tarfile import zipfile import pkginfo from bz2 import BZ2File from gzip import GzipFile from typing import List from typing import Union try: import urllib.parse as urlparse except ImportError: import urlparse try: from xmlrpc.client import ServerProxy except ImportError: from xmlrpclib import ServerProxy from cachecontrol import CacheControl from cachecontrol.caches.file_cache import FileCache from cachy import CacheManager from requests import get from requests import session from poetry.locations import CACHE_DIR from poetry.packages import dependency_from_pep_508 from poetry.packages import Package from poetry.semver.constraints import Constraint from poetry.semver.constraints.base_constraint import BaseConstraint from poetry.semver.version_parser import VersionParser from poetry.utils._compat import Path from poetry.utils.helpers import temporary_directory from poetry.version.markers import InvalidMarker from .repository import Repository class PyPiRepository(Repository): def __init__(self, url='https://pypi.org/', disable_cache=False, fallback=True): self._url = url self._disable_cache = disable_cache self._fallback = fallback release_cache_dir = Path(CACHE_DIR) / 'cache' / 'repositories' / 'pypi' self._cache = CacheManager({ 'default': 'releases', 'serializer': 'json', 'stores': { 'releases': { 'driver': 'file', 'path': str(release_cache_dir) }, 'packages': { 'driver': 'dict' } } }) self._session = CacheControl( session(), cache=FileCache(str(release_cache_dir / '_http')) ) super(PyPiRepository, self).__init__() def find_packages(self, name, # type: str constraint=None, # type: Union[Constraint, str, None] extras=None # type: Union[list, None] ): # type: (...) -> List[Package] """ Find packages on the remote server. """ packages = [] if constraint is not None and not isinstance(constraint, BaseConstraint): version_parser = VersionParser() constraint = version_parser.parse_constraints(constraint) info = self.get_package_info(name) versions = [] for version, release in info['releases'].items(): if not release: # Bad release continue if ( not constraint or (constraint and constraint.matches(Constraint('=', version))) ): versions.append(version) for version in versions: packages.append(Package(name, version)) return packages def package(self, name, # type: str version, # type: str extras=None # type: (Union[list, None]) ): # type: (...) -> Union[Package, None] try: index = self._packages.index(Package(name, version, version)) return self._packages[index] except ValueError: if extras is None: extras = [] release_info = self.get_release_info(name, version) if ( self._fallback and release_info['requires_dist'] is None and not release_info['requires_python'] and '_fallback' not in release_info ): # Force cache update self._cache.forget('{}:{}'.format(name, version)) release_info = self.get_release_info(name, version) package = Package(name, version, version) requires_dist = release_info['requires_dist'] or [] for req in requires_dist: try: dependency = dependency_from_pep_508(req) except InvalidMarker: # Invalid marker # We strip the markers hoping for the best req = req.split(';')[0] dependency = dependency_from_pep_508(req) except ValueError: # Likely unable to parse constraint so we skip it continue if dependency.extras: for extra in dependency.extras: if extra not in package.extras: package.extras[extra] = [] package.extras[extra].append(dependency) if not dependency.is_optional(): package.requires.append(dependency) # Adding description package.description = release_info.get('summary', '') if release_info['requires_python']: package.python_versions = release_info['requires_python'] if release_info['platform']: package.platform = release_info['platform'] # Adding hashes information package.hashes = release_info['digests'] # Activate extra dependencies for extra in extras: if extra in package.extras: for dep in package.extras[extra]: dep.activate() package.requires += package.extras[extra] self._packages.append(package) return package def search(self, query, mode=0): results = [] search = { 'name': query } if mode == self.SEARCH_FULLTEXT: search['summary'] = query client = ServerProxy('https://pypi.python.org/pypi') hits = client.search(search, 'or') for hit in hits: result = Package(hit['name'], hit['version'], hit['version']) result.description = hit['summary'] results.append(result) return results def get_package_info(self, name): # type: (str) -> dict """ Return the package information given its name. The information is returned from the cache if it exists or retrieved from the remote server. """ if self._disable_cache: return self._get_package_info(name) return self._cache.store('packages').remember_forever( name, lambda: self._get_package_info(name) ) def _get_package_info(self, name): # type: (str) -> dict data = self._get('pypi/{}/json'.format(name)) if data is None: raise ValueError('Package [{}] not found.'.format(name)) return data def get_release_info(self, name, version): # type: (str, str) -> dict """ Return the release information given a package name and a version. The information is returned from the cache if it exists or retrieved from the remote server. """ if self._disable_cache: return self._get_release_info(name, version) return self._cache.remember_forever( '{}:{}'.format(name, version), lambda: self._get_release_info(name, version) ) def _get_release_info(self, name, version): # type: (str, str) -> dict json_data = self._get('pypi/{}/{}/json'.format(name, version)) if json_data is None: raise ValueError('Package [{}] not found.'.format(name)) info = json_data['info'] data = { 'name': info['name'], 'version': info['version'], 'summary': info['summary'], 'platform': info['platform'], 'requires_dist': info['requires_dist'], 'requires_python': info['requires_python'], 'digests': [], '_fallback': False } try: version_info = json_data['releases'][version] except KeyError: version_info = [] for file_info in version_info: data['digests'].append(file_info['digests']['sha256']) if ( self._fallback and data['requires_dist'] is None and not data['requires_python'] ): # No dependencies set (along with other information) # This might be due to actually no dependencies # or badly set metadata when uploading # So, we need to make sure there is actually no # dependencies by introspecting packages data['_fallback'] = True urls = {} for url in json_data['urls']: # Only get sdist and universal wheels dist_type = url['packagetype'] if dist_type not in ['sdist', 'bdist_wheel']: continue if dist_type == 'sdist' and 'dist' not in urls: urls[url['packagetype']] = url['url'] continue if 'bdist_wheel' in urls: continue # If bdist_wheel, check if it's universal python_version = url['python_version'] if python_version not in ['py2.py3', 'py3', 'py2']: continue parts = urlparse.urlparse(url['url']) filename = os.path.basename(parts.path) if '-none-any' not in filename: continue if not urls: return data requires_dist = self._get_requires_dist_from_urls(urls) data['requires_dist'] = requires_dist return data def _get(self, endpoint): # type: (str) -> Union[dict, None] json_response = self._session.get(self._url + endpoint) if json_response.status_code == 404: return None json_data = json_response.json() return json_data def _get_requires_dist_from_urls(self, urls ): # type: (dict) -> Union[list, None] if 'bdist_wheel' in urls: return self._get_requires_dist_from_wheel(urls['bdist_wheek']) return self._get_requires_dist_from_sdist(urls['sdist']) def _get_requires_dist_from_wheel(self, url ): # type: (str) -> Union[list, None] filename = os.path.basename(urlparse.urlparse(url).path) with temporary_directory() as temp_dir: filepath = os.path.join(temp_dir, filename) self._download(url, filepath) meta = pkginfo.Wheel(filepath) if meta.requires_dist: return meta.requires_dist def _get_requires_dist_from_sdist(self, url ): # type: (str) -> Union[list, None] filename = os.path.basename(urlparse.urlparse(url).path) with temporary_directory() as temp_dir: filepath = Path(temp_dir) / filename self._download(url, str(filepath)) meta = pkginfo.SDist(str(filepath)) if meta.requires_dist: return meta.requires_dist # Still not dependencies found # So, we unpack and introspect suffix = filepath.suffix gz = None if suffix == '.zip': tar = zipfile.ZipFile(str(filepath)) else: if suffix == '.bz2': gz = BZ2File(str(filepath)) else: gz = GzipFile(str(filepath)) tar = tarfile.TarFile(str(filepath), fileobj=gz) try: tar.extractall(os.path.join(temp_dir, 'unpacked')) finally: if gz: gz.close() tar.close() unpacked = Path(temp_dir) / 'unpacked' sdist_dir = unpacked / Path(filename).name.rstrip('.tar.gz') # Checking for .egg-info eggs = list(sdist_dir.glob('*.egg-info')) if eggs: egg_info = eggs[0] requires = egg_info / 'requires.txt' if requires.exists(): with requires.open() as f: return self._parse_requires(f.read()) return # Still nothing, assume no dependencies # We could probably get them by executing # python setup.py egg-info but I don't feel # confortable executing a file just for the sake # of getting dependencies. return def _download(self, url, dest): # type: (str, str) -> None r = get(url, stream=True) with open(dest, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) def _parse_requires(self, requires): # type: (str) -> Union[list, None] lines = requires.split('\n') requires_dist = [] in_section = False current_marker = None for line in lines: line = line.strip() if not line: if in_section: in_section = False continue if line.startswith('['): # extras or conditional dependencies marker = line.lstrip('[').rstrip(']') if ':' not in marker: extra, marker = marker, None else: extra, marker = marker.split(':') if extra: if marker: marker = '{} and extra == "{}"'.format(marker, extra) else: marker = 'extra == "{}"'.format(extra) if marker: current_marker = marker continue if current_marker: line = '{}; {}'.format(line, current_marker) requires_dist.append(line) if requires_dist: return requires_dist PK!}j, , !poetry/repositories/repository.pyfrom poetry.semver.constraints import Constraint from poetry.semver.constraints.base_constraint import BaseConstraint from poetry.semver.version_parser import VersionParser from poetry.version import parse as parse_version from .base_repository import BaseRepository class Repository(BaseRepository): def __init__(self, packages=None): super(Repository, self).__init__() if packages is None: packages = [] for package in packages: self.add_package(package) def package(self, name, version): name = name.lower() version = str(parse_version(version)) for package in self.packages: if name == package.name and package.version == version: return package def find_packages(self, name, constraint=None, extras=None): name = name.lower() packages = [] if extras is None: extras = [] if not isinstance(constraint, BaseConstraint): parser = VersionParser() constraint = parser.parse_constraints(constraint) for package in self.packages: if name == package.name: pkg_constraint = Constraint('==', package.version) if constraint is None or constraint.matches(pkg_constraint): for extra in extras: if extra in package.extras: for dep in package.extras[extra]: dep.activate() package.requires += package.extras[extra] packages.append(package) return packages def has_package(self, package): package_id = package.unique_name for repo_package in self.packages: if package_id == repo_package.unique_name: return True return False def add_package(self, package): self._packages.append(package) def remove_package(self, package): package_id = package.unique_name index = None for i, repo_package in enumerate(self.packages): if package_id == repo_package.unique_name: index = i break if index is not None: del self._packages[index] def __len__(self): return len(self._packages) PK!8ESSpoetry/semver/__init__.pyfrom functools import cmp_to_key from .comparison import less_than from .constraints import Constraint from .helpers import normalize_version from .version_parser import VersionParser SORT_ASC = 1 SORT_DESC = -1 _parser = VersionParser() def statisfies(version, constraints): """ Determine if given version satisfies given constraints. :type version: str :type constraints: str :rtype: bool """ provider = Constraint('==', normalize_version(version)) constraints = _parser.parse_constraints(constraints) return constraints.matches(provider) def satisfied_by(versions, constraints): """ Return all versions that satisfy given constraints. :type versions: List[str] :type constraints: str :rtype: List[str] """ return [version for version in versions if statisfies(version, constraints)] def sort(versions): return _sort(versions, SORT_ASC) def rsort(versions): return _sort(versions, SORT_DESC) def _sort(versions, direction): normalized = [ (i, normalize_version(version)) for i, version in enumerate(versions) ] normalized.sort( key=cmp_to_key( lambda x, y: 0 if x[1] == y[1] else -direction * int(less_than(x[1], y[1]) or -1) ) ) return [versions[i] for i, _ in normalized] PK!gnpoetry/semver/comparison.pyfrom .constraints.constraint import Constraint def greater_than(version1, version2): """ Evaluates the expression: version1 > version2. :type version1: str :type version2: str :rtype: bool """ return compare(version1, '>', version2) def greater_than_or_equal(version1, version2): """ Evaluates the expression: version1 >= version2. :type version1: str :type version2: str :rtype: bool """ return compare(version1, '>=', version2) def less_than(version1, version2): """ Evaluates the expression: version1 < version2. :type version1: str :type version2: str :rtype: bool """ return compare(version1, '<', version2) def less_than_or_equal(version1, version2): """ Evaluates the expression: version1 <= version2. :type version1: str :type version2: str :rtype: bool """ return compare(version1, '<=', version2) def equal(version1, version2): """ Evaluates the expression: version1 == version2. :type version1: str :type version2: str :rtype: bool """ return compare(version1, '==', version2) def not_equal(version1, version2): """ Evaluates the expression: version1 != version2. :type version1: str :type version2: str :rtype: bool """ return compare(version1, '!=', version2) def compare(version1, operator, version2): """ Evaluates the expression: $version1 $operator $version2 :type version1: str :type operator: str :type version2: str :rtype: bool """ constraint = Constraint(operator, version2) return constraint.matches(Constraint('==', version1)) PK!䢬0%poetry/semver/constraints/__init__.pyfrom .constraint import Constraint from .empty_constraint import EmptyConstraint from .multi_constraint import MultiConstraint PK!!dd,poetry/semver/constraints/base_constraint.pyclass BaseConstraint(object): def matches(self, provider): raise NotImplementedError() PK!AJ'poetry/semver/constraints/constraint.pyimport operator from poetry.version import parse as parse_version from poetry.version import version_compare from ..helpers import normalize_version from .base_constraint import BaseConstraint class Constraint(BaseConstraint): OP_EQ = operator.eq OP_LT = operator.lt OP_LE = operator.le OP_GT = operator.gt OP_GE = operator.ge OP_NE = operator.ne _trans_op_str = { '=': OP_EQ, '==': OP_EQ, '<': OP_LT, '<=': OP_LE, '>': OP_GT, '>=': OP_GE, '!=': OP_NE } _trans_op_int = { OP_EQ: '==', OP_LT: '<', OP_LE: '<=', OP_GT: '>', OP_GE: '>=', OP_NE: '!=' } def __init__(self, operator, version): # type: (str, str) -> None if operator not in self.supported_operators: raise ValueError( 'Invalid operator "{}" given, ' 'expected one of: {}' .format( operator, ', '.join(self.supported_operators) ) ) self._operator = self._trans_op_str[operator] self._string_operator = operator self._version = str(parse_version(version)) @property def supported_operators(self): # type: () -> list return list(self._trans_op_str.keys()) @property def operator(self): return self._operator @property def string_operator(self): return self._string_operator @property def version(self): # type: () -> str return self._version def matches(self, provider): if ( isinstance(provider, self.__class__) and provider.__class__ is self.__class__ ): return self.match_specific(provider) # turn matching around to find a match return provider.matches(self) def version_compare(self, a, b, operator ): # type: (str, str, str) -> bool if operator not in self._trans_op_str: raise ValueError( 'Invalid operator "{}" given, ' 'expected one of: {}' .format( operator, ', '.join(self.supported_operators) ) ) return version_compare(a, b, operator) def match_specific(self, provider): # type: (Constraint) -> bool no_equal_op = self._trans_op_int[self._operator].replace('=', '') provider_no_equal_op = self._trans_op_int[provider.operator].replace('=', '') is_equal_op = self.OP_EQ is self._operator is_non_equal_op = self.OP_NE is self._operator is_provider_equal_op = self.OP_EQ is provider.operator is_provider_non_equal_op = self.OP_NE is provider.operator # '!=' operator is match when other operator # is not '==' operator or version is not match # these kinds of comparisons always have a solution if is_non_equal_op or is_provider_non_equal_op: return (not is_equal_op and not is_provider_equal_op or self.version_compare(provider.version, self._version, '!=')) # An example for the condition is <= 2.0 & < 1.0 # These kinds of comparisons always have a solution if (self._operator is not self.OP_EQ and no_equal_op == provider_no_equal_op): return True if self.version_compare( provider.version, self.version, self._trans_op_int[self._operator] ): # special case, e.g. require >= 1.0 and provide < 1.0 # 1.0 >= 1.0 but 1.0 is outside of the provided interval if ( provider.version == self.version and self._trans_op_int[provider.operator] == provider_no_equal_op and self._trans_op_int[self.operator] != no_equal_op ): return False return True return False def __str__(self): return '{} {}'.format( self._trans_op_int[self._operator], self._version ) def __repr__(self): return ''.format(str(self)) PK!"-poetry/semver/constraints/empty_constraint.pyfrom .base_constraint import BaseConstraint class EmptyConstraint(BaseConstraint): pretty_string = None def matches(self, _): return True def __str__(self): return '*' PK!!-poetry/semver/constraints/multi_constraint.pyfrom .base_constraint import BaseConstraint class MultiConstraint(BaseConstraint): def __init__(self, constraints, conjunctive=True): self._constraints = tuple(constraints) self._conjunctive = conjunctive @property def constraints(self): return self._constraints def is_conjunctive(self): return self._conjunctive def is_disjunctive(self): return not self._conjunctive def matches(self, provider): if self.is_disjunctive(): for constraint in self._constraints: if constraint.matches(provider): return True return False for constraint in self._constraints: if not constraint.matches(provider): return False return True def __str__(self): constraints = [] for constraint in self._constraints: constraints.append(str(constraint)) return '{}'.format( (', ' if self._conjunctive else ' || ').join(constraints) ) PK!qyр0poetry/semver/constraints/wildcard_constraint.pyimport re from .constraint import Constraint class WilcardConstraint(Constraint): def __init__(self, constraint): # type: (str) -> None m = re.match( '^(!= ?|==)?v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.[xX*])+$', constraint ) if not m: raise ValueError('Invalid value for wildcard constraint') if not m.group(1): operator = '==' else: operator = m.group(1).strip() super(WilcardConstraint, self).__init__( operator, '.'.join([g if g else '*' for g in m.groups()[1:]]) ) if m.group(4): position = 2 elif m.group(3): position = 1 else: position = 0 from ..version_parser import VersionParser parser = VersionParser() groups = m.groups()[1:] low_version = parser._manipulate_version_string( groups, position ) high_version = parser._manipulate_version_string( groups, position, 1 ) if operator == '!=': if low_version == '0.0.0.0': self._constraint = Constraint('>=', high_version) else: self._constraint = parser.parse_constraints( '<{} || >={}'.format(low_version, high_version) ) else: if low_version == '0.0.0.0': self._constraint = Constraint('<', high_version) else: self._constraint = parser.parse_constraints( '>={},<{}'.format(low_version, high_version) ) @property def supported_operators(self): return ['!=', '=='] @property def constraint(self): return self._constraint def matches(self, provider): # type: (Constraint) -> bool if isinstance(provider, self.__class__): return self._constraint.matches(provider.constraint) return provider.matches(self._constraint) def __str__(self): op = '' if self.string_operator == '!=': op = '!= ' return '{}{}'.format( op, self._version ) PK!4poetry/semver/helpers.pyimport re _modifier_regex = ( '[._-]?' '(?:(stable|beta|b|RC|c|pre|alpha|a|patch|pl|p|post|[a-z])' '((?:[.-]?\d+)*)?)?' '([.-]?dev)?' ) def normalize_version(version): """ Normalizes a version string to be able to perform comparisons on it. """ version = version.strip() # strip off build metadata m = re.match('^([^,\s+]+)\+[^\s]+$', version) if m: version = m.group(1) index = None # Match classic versioning m = re.match( '(?i)^v?(\d{{1,5}})(\.\d+)?(\.\d+)?(\.\d+)?{}$'.format( _modifier_regex ), version ) if m: version = '{}{}{}{}'.format( m.group(1), m.group(2) if m.group(2) else '.0', m.group(3) if m.group(3) else '.0', m.group(4) if m.group(4) else '.0', ) index = 5 else: # Some versions have the form M.m.p-\d+ # which means M.m.p-post\d+ m = re.match( '(?i)^v?(\d{1,5})(\.\d+)?(\.\d+)?(\.\d+)?-(\d+)$', version ) if m: version = '{}{}{}{}'.format( m.group(1), m.group(2) if m.group(2) else '.0', m.group(3) if m.group(3) else '.0', m.group(4) if m.group(4) else '.0', ) if m.group(5): version += '-post.' + m.group(5) m = re.match( '(?i)^v?(\d{{1,5}})(\.\d+)?(\.\d+)?(\.\d+)?{}$'.format( _modifier_regex ), version ) index = 5 else: # Match date(time) based versioning m = re.match( '(?i)^v?(\d{{4}}(?:[.:-]?\d{{2}}){{1,6}}(?:[.:-]?\d{{1,3}})?){}$'.format( _modifier_regex ), version ) if m: version = re.sub('\D', '.', m.group(1)) index = 2 # add version modifiers if a version was matched if index is not None: if len(m.groups()) - 1 >= index and m.group(index): if m.group(index) == 'post': # Post releases should be considered # stable releases if '-post' in version: return version version = '{}-post'.format(version) else: version = '{}-{}'.format( version, _expand_stability(m.group(index)) ) if m.group(index + 1): version = '{}.{}'.format( version, m.group(index + 1).lstrip('.-') ) return version raise ValueError('Invalid version string "{}"'.format(version)) def normalize_stability(stability): # type: (str) -> str stability = stability.lower() if stability == 'rc': return 'RC' return stability def parse_stability(version): # type: (str) -> str """ Returns the stability of a version. """ version = re.sub('(?i)#.+$', '', version) if 'dev-' == version[:4] or '-dev' == version[-4:]: return 'dev' m = re.search('(?i){}(?:\+.*)?$'.format(_modifier_regex), version.lower()) if m: if m.group(3): return 'dev' if m.group(1): if m.group(1) in ['beta', 'b']: return 'beta' elif m.group(1) in ['alpha', 'a']: return 'alpha' elif m.group(1) in ['rc', 'c']: return 'RC' elif m.group(1) == 'post': return 'stable' else: return 'dev' return 'stable' def _expand_stability(stability): # type: (str) -> str stability = stability.lower() if stability == 'a': return 'alpha' elif stability == 'b': return 'beta' elif stability in ['c', 'pre']: return 'rc' elif stability in ['p', 'pl']: return 'patch' elif stability in ['post']: return 'post' return stability PK!8i[%%poetry/semver/version_parser.pyimport re from typing import Tuple from typing import Union from .constraints.constraint import Constraint from .constraints.base_constraint import BaseConstraint from .constraints.empty_constraint import EmptyConstraint from .constraints.multi_constraint import MultiConstraint from .constraints.wildcard_constraint import WilcardConstraint from .helpers import normalize_version, _expand_stability, parse_stability class VersionParser: _modifier_regex = ( '[._-]?' '(?:(stable|beta|b|RC|alpha|a|patch|post|pl|p)((?:[.-]?\d+)*)?)?' '([.-]?dev)?' ) _stabilities = [ 'stable', 'RC', 'beta', 'alpha', 'dev' ] def parse_constraints( self, constraints ): # type: (str) -> Union[Constraint, MultiConstraint] """ Parses a constraint string into MultiConstraint and/or Constraint objects. """ pretty_constraint = constraints m = re.match( '(?i)([^,\s]*?)@({})$'.format('|'.join(self._stabilities)), constraints ) if m: constraints = m.group(1) if not constraints: constraints = '*' or_constraints = re.split('\s*\|\|?\s*', constraints.strip()) or_groups = [] for constraints in or_constraints: and_constraints = re.split( '(?< ,]) *(? 1: constraint_objects = [] for constraint in and_constraints: for parsed_constraint in self._parse_constraint(constraint): constraint_objects.append(parsed_constraint) else: constraint_objects = self._parse_constraint(and_constraints[0]) if len(constraint_objects) == 1: constraint = constraint_objects[0] else: constraint = MultiConstraint(constraint_objects) or_groups.append(constraint) if len(or_groups) == 1: constraint = or_groups[0] elif len(or_groups) == 2: # parse the two OR groups and if they are contiguous we collapse # them into one constraint a = str(or_groups[0]) b = str(or_groups[1]) pos_a = a.find('<', 4) pos_b = a.find('<', 4) if ( isinstance(or_groups[0], MultiConstraint) and isinstance(or_groups[1], MultiConstraint) and len(or_groups[0].constraints) and len(or_groups[1].constraints) and a[:3] == '>=' and pos_a != -1 and b[:3] == '>=' and pos_b != -1 and a[pos_a + 2:-1] == b[4:pos_b - 5] ): constraint = MultiConstraint( Constraint('>=', a[4:pos_a - 5]), Constraint('<', b[pos_b + 2:-1]) ) else: constraint = MultiConstraint(or_groups, False) else: constraint = MultiConstraint(or_groups, False) constraint.pretty_string = pretty_constraint return constraint def _parse_constraint( self, constraint ): # type: (str) -> Union[Tuple[BaseConstraint], Tuple[BaseConstraint, BaseConstraint]] m = re.match('(?i)^v?[xX*](\.[xX*])*$', constraint) if m: return EmptyConstraint(), # Some versions have the form M.m.p-\d+ # which means M.m.p-post\d+ m = re.match( '(?i)^(~=?|\^|<> ?|!= ?|>=? ?|<=? ?|==? ?)v?(\d{{1,5}})(\.\d+)?(\.\d+)?(\.\d+)?-(\d+){}$'.format( self._modifier_regex ), constraint ) if m: constraint = '{}{}{}{}{}'.format( m.group(1), m.group(2), m.group(3) if m.group(3) else '.0', m.group(4) if m.group(4) else '.0', m.group(5) if m.group(5) else '.0', ) if m.group(6): constraint += '-post.' + m.group(6) version_regex = ( 'v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?{}(?:\+[^\s]+)?' ).format(self._modifier_regex) # Tilde range # # Like wildcard constraints, unsuffixed tilde constraints # say that they must be greater than the previous version, # to ensure that unstable instances of the current version are allowed. # However, if a stability suffix is added to the constraint, # then a >= match on the current version is used instead. m = re.match('(?i)^~=?{}$'.format(version_regex), constraint) if m: # Work out which position in the version we are operating at if m.group(4): position = 3 elif m.group(3): position = 2 elif m.group(2): position = 2 else: position = 0 # Calculate the stability suffix stability_suffix = '' if m.group(5): stability_suffix += '-{}{}'.format( _expand_stability(m.group(5)), '.' + m.group(6) if m.group(6) else '' ) low_version = self._manipulate_version_string( m.groups(), position, 0 ) + stability_suffix lower_bound = Constraint('>=', low_version) # For upper bound, # we increment the position of one more significance, # but high_position = 0 would be illegal high_position = max(0, position - 1) high_version = self._manipulate_version_string( m.groups(), high_position, 1 ) upper_bound = Constraint('<', high_version) return lower_bound, upper_bound # Caret range # # Allows changes that do not modify # the left-most non-zero digit in the [major, minor, patch] tuple. # In other words, this allows: # - patch and minor updates for versions 1.0.0 and above, # - patch updates for versions 0.X >=0.1.0, # - and no updates for versions 0.0.X m = re.match('^\^{}($)'.format(version_regex), constraint) if m: if m.group(1) != '0' or not m.group(2): position = 0 elif m.group(2) != '0' or not m.group(3): position = 1 else: position = 2 # Calculate the stability suffix stability_suffix = '' if m.group(5): stability_suffix += '-{}{}'.format( _expand_stability(m.group(5)), '.' + m.group(6) if m.group(6) else '' ) low_version = normalize_version(constraint[1:]) lower_bound = Constraint('>=', low_version) # For upper bound, # we increment the position of one more significance, # but high_position = 0 would be illegal high_version = self._manipulate_version_string( m.groups(), position, 1 ) upper_bound = Constraint('<', high_version) return lower_bound, upper_bound # X range # # Any of X, x, or * may be used to "stand in" # for one of the numeric values in the [major, minor, patch] tuple. # A partial version range is treated as an X-Range, # so the special character is in fact optional. m = re.match( '^(!= ?|==)?v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.[xX*])+$', constraint ) if m: # We just leave it as is return WilcardConstraint(constraint), # Basic Comparators m = re.match('^(<>|!=|>=?|<=?|==?)?\s*(.*)', constraint) if m: try: version = normalize_version(m.group(2)) stability = parse_stability(version) stability_re = re.match( '(?:[^-]*)(-{})$'.format(self._modifier_regex), m.group(2).lower() ) if stability == 'stable' and stability_re: version = version.split('-')[0] + stability_re.group(1) return Constraint(m.group(1) or '=', version), except ValueError: pass raise ValueError( 'Could not parse version constraint: {}'.format(constraint) ) def _manipulate_version_string(self, matches, position, increment=0, pad='0'): """ Increment, decrement, or simply pad a version number. """ matches = [matches[i] if i <= len(matches) - 1 and matches[i] is not None else pad for i in range(4)] for i in range(3, -1, -1): if i > position: matches[i] = pad elif i == position and increment: matches[i] = int(matches[i]) + increment # If matches[i] was 0, carry the decrement if matches[i] < 0: matches[i] = pad position -= 1 # Return null on a carry overflow if i == 1: return return '{}.{}.{}.{}'.format(matches[0], matches[1], matches[2], matches[3]) PK!Y&&poetry/spdx/__init__.pyimport json import os from .license import License from .updater import Updater _licenses = None def license_by_id(identifier): if _licenses is None: load_licenses() id = identifier.lower() if id not in _licenses: raise ValueError('Invalid license id: {}'.format(identifier)) return _licenses[id] def load_licenses(): global _licenses _licenses = {} licenses_file = os.path.join( os.path.dirname(__file__), 'data', 'licenses.json' ) with open(licenses_file) as f: data = json.loads(f.read()) for name, license in data.items(): _licenses[name.lower()] = License( name, license[0], license[1], license[2] ) if __name__ == '__main__': updater = Updater() updater.dump() PK!=|uupoetry/spdx/data/licenses.json{ "0BSD": [ "BSD Zero Clause License", false, false ], "AAL": [ "Attribution Assurance License", true, false ], "ADSL": [ "Amazon Digital Services License", false, false ], "AFL-1.1": [ "Academic Free License v1.1", true, false ], "AFL-1.2": [ "Academic Free License v1.2", true, false ], "AFL-2.0": [ "Academic Free License v2.0", true, false ], "AFL-2.1": [ "Academic Free License v2.1", true, false ], "AFL-3.0": [ "Academic Free License v3.0", true, false ], "AGPL-1.0": [ "Affero General Public License v1.0", false, false ], "AGPL-3.0": [ "GNU Affero General Public License v3.0", true, true ], "AGPL-3.0-only": [ "GNU Affero General Public License v3.0 only", true, false ], "AGPL-3.0-or-later": [ "GNU Affero General Public License v3.0 or later", true, false ], "AMDPLPA": [ "AMD's plpa_map.c License", false, false ], "AML": [ "Apple MIT License", false, false ], "AMPAS": [ "Academy of Motion Picture Arts and Sciences BSD", false, false ], "ANTLR-PD": [ "ANTLR Software Rights Notice", false, false ], "APAFML": [ "Adobe Postscript AFM License", false, false ], "APL-1.0": [ "Adaptive Public License 1.0", true, false ], "APSL-1.0": [ "Apple Public Source License 1.0", true, false ], "APSL-1.1": [ "Apple Public Source License 1.1", true, false ], "APSL-1.2": [ "Apple Public Source License 1.2", true, false ], "APSL-2.0": [ "Apple Public Source License 2.0", true, false ], "Abstyles": [ "Abstyles License", false, false ], "Adobe-2006": [ "Adobe Systems Incorporated Source Code License Agreement", false, false ], "Adobe-Glyph": [ "Adobe Glyph List License", false, false ], "Afmparse": [ "Afmparse License", false, false ], "Aladdin": [ "Aladdin Free Public License", false, false ], "Apache-1.0": [ "Apache License 1.0", false, false ], "Apache-1.1": [ "Apache License 1.1", true, false ], "Apache-2.0": [ "Apache License 2.0", true, false ], "Artistic-1.0": [ "Artistic License 1.0", true, false ], "Artistic-1.0-Perl": [ "Artistic License 1.0 (Perl)", true, false ], "Artistic-1.0-cl8": [ "Artistic License 1.0 w/clause 8", true, false ], "Artistic-2.0": [ "Artistic License 2.0", true, false ], "BSD-1-Clause": [ "BSD 1-Clause License", false, false ], "BSD-2-Clause": [ "BSD 2-Clause \"Simplified\" License", true, false ], "BSD-2-Clause-FreeBSD": [ "BSD 2-Clause FreeBSD License", false, false ], "BSD-2-Clause-NetBSD": [ "BSD 2-Clause NetBSD License", false, false ], "BSD-2-Clause-Patent": [ "BSD-2-Clause Plus Patent License", true, false ], "BSD-3-Clause": [ "BSD 3-Clause \"New\" or \"Revised\" License", true, false ], "BSD-3-Clause-Attribution": [ "BSD with attribution", false, false ], "BSD-3-Clause-Clear": [ "BSD 3-Clause Clear License", false, false ], "BSD-3-Clause-LBNL": [ "Lawrence Berkeley National Labs BSD variant license", false, false ], "BSD-3-Clause-No-Nuclear-License": [ "BSD 3-Clause No Nuclear License", false, false ], "BSD-3-Clause-No-Nuclear-License-2014": [ "BSD 3-Clause No Nuclear License 2014", false, false ], "BSD-3-Clause-No-Nuclear-Warranty": [ "BSD 3-Clause No Nuclear Warranty", false, false ], "BSD-4-Clause": [ "BSD 4-Clause \"Original\" or \"Old\" License", false, false ], "BSD-4-Clause-UC": [ "BSD-4-Clause (University of California-Specific)", false, false ], "BSD-Protection": [ "BSD Protection License", false, false ], "BSD-Source-Code": [ "BSD Source Code Attribution", false, false ], "BSL-1.0": [ "Boost Software License 1.0", true, false ], "Bahyph": [ "Bahyph License", false, false ], "Barr": [ "Barr License", false, false ], "Beerware": [ "Beerware License", false, false ], "BitTorrent-1.0": [ "BitTorrent Open Source License v1.0", false, false ], "BitTorrent-1.1": [ "BitTorrent Open Source License v1.1", false, false ], "Borceux": [ "Borceux license", false, false ], "CATOSL-1.1": [ "Computer Associates Trusted Open Source License 1.1", true, false ], "CC-BY-1.0": [ "Creative Commons Attribution 1.0", false, false ], "CC-BY-2.0": [ "Creative Commons Attribution 2.0", false, false ], "CC-BY-2.5": [ "Creative Commons Attribution 2.5", false, false ], "CC-BY-3.0": [ "Creative Commons Attribution 3.0", false, false ], "CC-BY-4.0": [ "Creative Commons Attribution 4.0", false, false ], "CC-BY-NC-1.0": [ "Creative Commons Attribution Non Commercial 1.0", false, false ], "CC-BY-NC-2.0": [ "Creative Commons Attribution Non Commercial 2.0", false, false ], "CC-BY-NC-2.5": [ "Creative Commons Attribution Non Commercial 2.5", false, false ], "CC-BY-NC-3.0": [ "Creative Commons Attribution Non Commercial 3.0", false, false ], "CC-BY-NC-4.0": [ "Creative Commons Attribution Non Commercial 4.0", false, false ], "CC-BY-NC-ND-1.0": [ "Creative Commons Attribution Non Commercial No Derivatives 1.0", false, false ], "CC-BY-NC-ND-2.0": [ "Creative Commons Attribution Non Commercial No Derivatives 2.0", false, false ], "CC-BY-NC-ND-2.5": [ "Creative Commons Attribution Non Commercial No Derivatives 2.5", false, false ], "CC-BY-NC-ND-3.0": [ "Creative Commons Attribution Non Commercial No Derivatives 3.0", false, false ], "CC-BY-NC-ND-4.0": [ "Creative Commons Attribution Non Commercial No Derivatives 4.0", false, false ], "CC-BY-NC-SA-1.0": [ "Creative Commons Attribution Non Commercial Share Alike 1.0", false, false ], "CC-BY-NC-SA-2.0": [ "Creative Commons Attribution Non Commercial Share Alike 2.0", false, false ], "CC-BY-NC-SA-2.5": [ "Creative Commons Attribution Non Commercial Share Alike 2.5", false, false ], "CC-BY-NC-SA-3.0": [ "Creative Commons Attribution Non Commercial Share Alike 3.0", false, false ], "CC-BY-NC-SA-4.0": [ "Creative Commons Attribution Non Commercial Share Alike 4.0", false, false ], "CC-BY-ND-1.0": [ "Creative Commons Attribution No Derivatives 1.0", false, false ], "CC-BY-ND-2.0": [ "Creative Commons Attribution No Derivatives 2.0", false, false ], "CC-BY-ND-2.5": [ "Creative Commons Attribution No Derivatives 2.5", false, false ], "CC-BY-ND-3.0": [ "Creative Commons Attribution No Derivatives 3.0", false, false ], "CC-BY-ND-4.0": [ "Creative Commons Attribution No Derivatives 4.0", false, false ], "CC-BY-SA-1.0": [ "Creative Commons Attribution Share Alike 1.0", false, false ], "CC-BY-SA-2.0": [ "Creative Commons Attribution Share Alike 2.0", false, false ], "CC-BY-SA-2.5": [ "Creative Commons Attribution Share Alike 2.5", false, false ], "CC-BY-SA-3.0": [ "Creative Commons Attribution Share Alike 3.0", false, false ], "CC-BY-SA-4.0": [ "Creative Commons Attribution Share Alike 4.0", false, false ], "CC0-1.0": [ "Creative Commons Zero v1.0 Universal", false, false ], "CDDL-1.0": [ "Common Development and Distribution License 1.0", true, false ], "CDDL-1.1": [ "Common Development and Distribution License 1.1", false, false ], "CDLA-Permissive-1.0": [ "Community Data License Agreement Permissive 1.0", false, false ], "CDLA-Sharing-1.0": [ "Community Data License Agreement Sharing 1.0", false, false ], "CECILL-1.0": [ "CeCILL Free Software License Agreement v1.0", false, false ], "CECILL-1.1": [ "CeCILL Free Software License Agreement v1.1", false, false ], "CECILL-2.0": [ "CeCILL Free Software License Agreement v2.0", false, false ], "CECILL-2.1": [ "CeCILL Free Software License Agreement v2.1", true, false ], "CECILL-B": [ "CeCILL-B Free Software License Agreement", false, false ], "CECILL-C": [ "CeCILL-C Free Software License Agreement", false, false ], "CNRI-Jython": [ "CNRI Jython License", false, false ], "CNRI-Python": [ "CNRI Python License", true, false ], "CNRI-Python-GPL-Compatible": [ "CNRI Python Open Source GPL Compatible License Agreement", false, false ], "CPAL-1.0": [ "Common Public Attribution License 1.0", true, false ], "CPL-1.0": [ "Common Public License 1.0", true, false ], "CPOL-1.02": [ "Code Project Open License 1.02", false, false ], "CUA-OPL-1.0": [ "CUA Office Public License v1.0", true, false ], "Caldera": [ "Caldera License", false, false ], "ClArtistic": [ "Clarified Artistic License", false, false ], "Condor-1.1": [ "Condor Public License v1.1", false, false ], "Crossword": [ "Crossword License", false, false ], "CrystalStacker": [ "CrystalStacker License", false, false ], "Cube": [ "Cube License", false, false ], "D-FSL-1.0": [ "Deutsche Freie Software Lizenz", false, false ], "DOC": [ "DOC License", false, false ], "DSDP": [ "DSDP License", false, false ], "Dotseqn": [ "Dotseqn License", false, false ], "ECL-1.0": [ "Educational Community License v1.0", true, false ], "ECL-2.0": [ "Educational Community License v2.0", true, false ], "EFL-1.0": [ "Eiffel Forum License v1.0", true, false ], "EFL-2.0": [ "Eiffel Forum License v2.0", true, false ], "EPL-1.0": [ "Eclipse Public License 1.0", true, false ], "EPL-2.0": [ "Eclipse Public License 2.0", true, false ], "EUDatagrid": [ "EU DataGrid Software License", true, false ], "EUPL-1.0": [ "European Union Public License 1.0", false, false ], "EUPL-1.1": [ "European Union Public License 1.1", true, false ], "EUPL-1.2": [ "European Union Public License 1.2", true, false ], "Entessa": [ "Entessa Public License v1.0", true, false ], "ErlPL-1.1": [ "Erlang Public License v1.1", false, false ], "Eurosym": [ "Eurosym License", false, false ], "FSFAP": [ "FSF All Permissive License", false, false ], "FSFUL": [ "FSF Unlimited License", false, false ], "FSFULLR": [ "FSF Unlimited License (with License Retention)", false, false ], "FTL": [ "Freetype Project License", false, false ], "Fair": [ "Fair License", true, false ], "Frameworx-1.0": [ "Frameworx Open License 1.0", true, false ], "FreeImage": [ "FreeImage Public License v1.0", false, false ], "GFDL-1.1": [ "GNU Free Documentation License v1.1", false, true ], "GFDL-1.1-only": [ "GNU Free Documentation License v1.1 only", false, false ], "GFDL-1.1-or-later": [ "GNU Free Documentation License v1.1 or later", false, false ], "GFDL-1.2": [ "GNU Free Documentation License v1.2", false, true ], "GFDL-1.2-only": [ "GNU Free Documentation License v1.2 only", false, false ], "GFDL-1.2-or-later": [ "GNU Free Documentation License v1.2 or later", false, false ], "GFDL-1.3": [ "GNU Free Documentation License v1.3", false, true ], "GFDL-1.3-only": [ "GNU Free Documentation License v1.3 only", false, false ], "GFDL-1.3-or-later": [ "GNU Free Documentation License v1.3 or later", false, false ], "GL2PS": [ "GL2PS License", false, false ], "GPL-1.0": [ "GNU General Public License v1.0 only", false, true ], "GPL-1.0+": [ "GNU General Public License v1.0 or later", false, true ], "GPL-1.0-only": [ "GNU General Public License v1.0 only", false, false ], "GPL-1.0-or-later": [ "GNU General Public License v1.0 or later", false, false ], "GPL-2.0": [ "GNU General Public License v2.0 only", true, true ], "GPL-2.0+": [ "GNU General Public License v2.0 or later", true, true ], "GPL-2.0-only": [ "GNU General Public License v2.0 only", true, false ], "GPL-2.0-or-later": [ "GNU General Public License v2.0 or later", true, false ], "GPL-2.0-with-GCC-exception": [ "GNU General Public License v2.0 w/GCC Runtime Library exception", false, true ], "GPL-2.0-with-autoconf-exception": [ "GNU General Public License v2.0 w/Autoconf exception", false, true ], "GPL-2.0-with-bison-exception": [ "GNU General Public License v2.0 w/Bison exception", false, true ], "GPL-2.0-with-classpath-exception": [ "GNU General Public License v2.0 w/Classpath exception", false, true ], "GPL-2.0-with-font-exception": [ "GNU General Public License v2.0 w/Font exception", false, true ], "GPL-3.0": [ "GNU General Public License v3.0 only", true, true ], "GPL-3.0+": [ "GNU General Public License v3.0 or later", true, true ], "GPL-3.0-only": [ "GNU General Public License v3.0 only", true, false ], "GPL-3.0-or-later": [ "GNU General Public License v3.0 or later", true, false ], "GPL-3.0-with-GCC-exception": [ "GNU General Public License v3.0 w/GCC Runtime Library exception", true, true ], "GPL-3.0-with-autoconf-exception": [ "GNU General Public License v3.0 w/Autoconf exception", false, true ], "Giftware": [ "Giftware License", false, false ], "Glide": [ "3dfx Glide License", false, false ], "Glulxe": [ "Glulxe License", false, false ], "HPND": [ "Historical Permission Notice and Disclaimer", true, false ], "HaskellReport": [ "Haskell Language Report License", false, false ], "IBM-pibs": [ "IBM PowerPC Initialization and Boot Software", false, false ], "ICU": [ "ICU License", false, false ], "IJG": [ "Independent JPEG Group License", false, false ], "IPA": [ "IPA Font License", true, false ], "IPL-1.0": [ "IBM Public License v1.0", true, false ], "ISC": [ "ISC License", true, false ], "ImageMagick": [ "ImageMagick License", false, false ], "Imlib2": [ "Imlib2 License", false, false ], "Info-ZIP": [ "Info-ZIP License", false, false ], "Intel": [ "Intel Open Source License", true, false ], "Intel-ACPI": [ "Intel ACPI Software License Agreement", false, false ], "Interbase-1.0": [ "Interbase Public License v1.0", false, false ], "JSON": [ "JSON License", false, false ], "JasPer-2.0": [ "JasPer License", false, false ], "LAL-1.2": [ "Licence Art Libre 1.2", false, false ], "LAL-1.3": [ "Licence Art Libre 1.3", false, false ], "LGPL-2.0": [ "GNU Library General Public License v2 only", true, true ], "LGPL-2.0+": [ "GNU Library General Public License v2 or later", true, true ], "LGPL-2.0-only": [ "GNU Library General Public License v2 only", true, false ], "LGPL-2.0-or-later": [ "GNU Library General Public License v2 or later", true, false ], "LGPL-2.1": [ "GNU Lesser General Public License v2.1 only", true, true ], "LGPL-2.1+": [ "GNU Library General Public License v2 or later", true, true ], "LGPL-2.1-only": [ "GNU Lesser General Public License v2.1 only", true, false ], "LGPL-2.1-or-later": [ "GNU Lesser General Public License v2.1 or later", true, false ], "LGPL-3.0": [ "GNU Lesser General Public License v3.0 only", true, true ], "LGPL-3.0+": [ "GNU Lesser General Public License v3.0 or later", true, true ], "LGPL-3.0-only": [ "GNU Lesser General Public License v3.0 only", true, false ], "LGPL-3.0-or-later": [ "GNU Lesser General Public License v3.0 or later", true, false ], "LGPLLR": [ "Lesser General Public License For Linguistic Resources", false, false ], "LPL-1.0": [ "Lucent Public License Version 1.0", true, false ], "LPL-1.02": [ "Lucent Public License v1.02", true, false ], "LPPL-1.0": [ "LaTeX Project Public License v1.0", false, false ], "LPPL-1.1": [ "LaTeX Project Public License v1.1", false, false ], "LPPL-1.2": [ "LaTeX Project Public License v1.2", false, false ], "LPPL-1.3a": [ "LaTeX Project Public License v1.3a", false, false ], "LPPL-1.3c": [ "LaTeX Project Public License v1.3c", true, false ], "Latex2e": [ "Latex2e License", false, false ], "Leptonica": [ "Leptonica License", false, false ], "LiLiQ-P-1.1": [ "Licence Libre du Qu\u00e9bec \u2013 Permissive version 1.1", true, false ], "LiLiQ-R-1.1": [ "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 version 1.1", true, false ], "LiLiQ-Rplus-1.1": [ "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 forte version 1.1", true, false ], "Libpng": [ "libpng License", false, false ], "MIT": [ "MIT License", true, false ], "MIT-CMU": [ "CMU License", false, false ], "MIT-advertising": [ "Enlightenment License (e16)", false, false ], "MIT-enna": [ "enna License", false, false ], "MIT-feh": [ "feh License", false, false ], "MITNFA": [ "MIT +no-false-attribs license", false, false ], "MPL-1.0": [ "Mozilla Public License 1.0", true, false ], "MPL-1.1": [ "Mozilla Public License 1.1", true, false ], "MPL-2.0": [ "Mozilla Public License 2.0", true, false ], "MPL-2.0-no-copyleft-exception": [ "Mozilla Public License 2.0 (no copyleft exception)", true, false ], "MS-PL": [ "Microsoft Public License", true, false ], "MS-RL": [ "Microsoft Reciprocal License", true, false ], "MTLL": [ "Matrix Template Library License", false, false ], "MakeIndex": [ "MakeIndex License", false, false ], "MirOS": [ "MirOS License", true, false ], "Motosoto": [ "Motosoto License", true, false ], "Multics": [ "Multics License", true, false ], "Mup": [ "Mup License", false, false ], "NASA-1.3": [ "NASA Open Source Agreement 1.3", true, false ], "NBPL-1.0": [ "Net Boolean Public License v1", false, false ], "NCSA": [ "University of Illinois/NCSA Open Source License", true, false ], "NGPL": [ "Nethack General Public License", true, false ], "NLOD-1.0": [ "Norwegian Licence for Open Government Data", false, false ], "NLPL": [ "No Limit Public License", false, false ], "NOSL": [ "Netizen Open Source License", false, false ], "NPL-1.0": [ "Netscape Public License v1.0", false, false ], "NPL-1.1": [ "Netscape Public License v1.1", false, false ], "NPOSL-3.0": [ "Non-Profit Open Software License 3.0", true, false ], "NRL": [ "NRL License", false, false ], "NTP": [ "NTP License", true, false ], "Naumen": [ "Naumen Public License", true, false ], "Net-SNMP": [ "Net-SNMP License", false, false ], "NetCDF": [ "NetCDF license", false, false ], "Newsletr": [ "Newsletr License", false, false ], "Nokia": [ "Nokia Open Source License", true, false ], "Noweb": [ "Noweb License", false, false ], "Nunit": [ "Nunit License", false, true ], "OCCT-PL": [ "Open CASCADE Technology Public License", false, false ], "OCLC-2.0": [ "OCLC Research Public License 2.0", true, false ], "ODbL-1.0": [ "ODC Open Database License v1.0", false, false ], "OFL-1.0": [ "SIL Open Font License 1.0", false, false ], "OFL-1.1": [ "SIL Open Font License 1.1", true, false ], "OGTSL": [ "Open Group Test Suite License", true, false ], "OLDAP-1.1": [ "Open LDAP Public License v1.1", false, false ], "OLDAP-1.2": [ "Open LDAP Public License v1.2", false, false ], "OLDAP-1.3": [ "Open LDAP Public License v1.3", false, false ], "OLDAP-1.4": [ "Open LDAP Public License v1.4", false, false ], "OLDAP-2.0": [ "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)", false, false ], "OLDAP-2.0.1": [ "Open LDAP Public License v2.0.1", false, false ], "OLDAP-2.1": [ "Open LDAP Public License v2.1", false, false ], "OLDAP-2.2": [ "Open LDAP Public License v2.2", false, false ], "OLDAP-2.2.1": [ "Open LDAP Public License v2.2.1", false, false ], "OLDAP-2.2.2": [ "Open LDAP Public License 2.2.2", false, false ], "OLDAP-2.3": [ "Open LDAP Public License v2.3", false, false ], "OLDAP-2.4": [ "Open LDAP Public License v2.4", false, false ], "OLDAP-2.5": [ "Open LDAP Public License v2.5", false, false ], "OLDAP-2.6": [ "Open LDAP Public License v2.6", false, false ], "OLDAP-2.7": [ "Open LDAP Public License v2.7", false, false ], "OLDAP-2.8": [ "Open LDAP Public License v2.8", false, false ], "OML": [ "Open Market License", false, false ], "OPL-1.0": [ "Open Public License v1.0", false, false ], "OSET-PL-2.1": [ "OSET Public License version 2.1", true, false ], "OSL-1.0": [ "Open Software License 1.0", true, false ], "OSL-1.1": [ "Open Software License 1.1", false, false ], "OSL-2.0": [ "Open Software License 2.0", true, false ], "OSL-2.1": [ "Open Software License 2.1", true, false ], "OSL-3.0": [ "Open Software License 3.0", true, false ], "OpenSSL": [ "OpenSSL License", false, false ], "PDDL-1.0": [ "ODC Public Domain Dedication & License 1.0", false, false ], "PHP-3.0": [ "PHP License v3.0", true, false ], "PHP-3.01": [ "PHP License v3.01", false, false ], "Plexus": [ "Plexus Classworlds License", false, false ], "PostgreSQL": [ "PostgreSQL License", true, false ], "Python-2.0": [ "Python License 2.0", true, false ], "QPL-1.0": [ "Q Public License 1.0", true, false ], "Qhull": [ "Qhull License", false, false ], "RHeCos-1.1": [ "Red Hat eCos Public License v1.1", false, false ], "RPL-1.1": [ "Reciprocal Public License 1.1", true, false ], "RPL-1.5": [ "Reciprocal Public License 1.5", true, false ], "RPSL-1.0": [ "RealNetworks Public Source License v1.0", true, false ], "RSA-MD": [ "RSA Message-Digest License ", false, false ], "RSCPL": [ "Ricoh Source Code Public License", true, false ], "Rdisc": [ "Rdisc License", false, false ], "Ruby": [ "Ruby License", false, false ], "SAX-PD": [ "Sax Public Domain Notice", false, false ], "SCEA": [ "SCEA Shared Source License", false, false ], "SGI-B-1.0": [ "SGI Free Software License B v1.0", false, false ], "SGI-B-1.1": [ "SGI Free Software License B v1.1", false, false ], "SGI-B-2.0": [ "SGI Free Software License B v2.0", false, false ], "SISSL": [ "Sun Industry Standards Source License v1.1", true, false ], "SISSL-1.2": [ "Sun Industry Standards Source License v1.2", false, false ], "SMLNJ": [ "Standard ML of New Jersey License", false, false ], "SMPPL": [ "Secure Messaging Protocol Public License", false, false ], "SNIA": [ "SNIA Public License 1.1", false, false ], "SPL-1.0": [ "Sun Public License v1.0", true, false ], "SWL": [ "Scheme Widget Library (SWL) Software License Agreement", false, false ], "Saxpath": [ "Saxpath License", false, false ], "Sendmail": [ "Sendmail License", false, false ], "SimPL-2.0": [ "Simple Public License 2.0", true, false ], "Sleepycat": [ "Sleepycat License", true, false ], "Spencer-86": [ "Spencer License 86", false, false ], "Spencer-94": [ "Spencer License 94", false, false ], "Spencer-99": [ "Spencer License 99", false, false ], "StandardML-NJ": [ "Standard ML of New Jersey License", false, true ], "SugarCRM-1.1.3": [ "SugarCRM Public License v1.1.3", false, false ], "TCL": [ "TCL/TK License", false, false ], "TCP-wrappers": [ "TCP Wrappers License", false, false ], "TMate": [ "TMate Open Source License", false, false ], "TORQUE-1.1": [ "TORQUE v2.5+ Software License v1.1", false, false ], "TOSL": [ "Trusster Open Source License", false, false ], "UPL-1.0": [ "Universal Permissive License v1.0", true, false ], "Unicode-DFS-2015": [ "Unicode License Agreement - Data Files and Software (2015)", false, false ], "Unicode-DFS-2016": [ "Unicode License Agreement - Data Files and Software (2016)", false, false ], "Unicode-TOU": [ "Unicode Terms of Use", false, false ], "Unlicense": [ "The Unlicense", false, false ], "VOSTROM": [ "VOSTROM Public License for Open Source", false, false ], "VSL-1.0": [ "Vovida Software License v1.0", true, false ], "Vim": [ "Vim License", false, false ], "W3C": [ "W3C Software Notice and License (2002-12-31)", true, false ], "W3C-19980720": [ "W3C Software Notice and License (1998-07-20)", false, false ], "W3C-20150513": [ "W3C Software Notice and Document License (2015-05-13)", false, false ], "WTFPL": [ "Do What The F*ck You Want To Public License", false, false ], "Watcom-1.0": [ "Sybase Open Watcom Public License 1.0", true, false ], "Wsuipa": [ "Wsuipa License", false, false ], "X11": [ "X11 License", false, false ], "XFree86-1.1": [ "XFree86 License 1.1", false, false ], "XSkat": [ "XSkat License", false, false ], "Xerox": [ "Xerox License", false, false ], "Xnet": [ "X.Net License", true, false ], "YPL-1.0": [ "Yahoo! Public License v1.0", false, false ], "YPL-1.1": [ "Yahoo! Public License v1.1", false, false ], "ZPL-1.1": [ "Zope Public License 1.1", false, false ], "ZPL-2.0": [ "Zope Public License 2.0", true, false ], "ZPL-2.1": [ "Zope Public License 2.1", false, false ], "Zed": [ "Zed License", false, false ], "Zend-2.0": [ "Zend License v2.0", false, false ], "Zimbra-1.3": [ "Zimbra Public License v1.3", false, false ], "Zimbra-1.4": [ "Zimbra Public License v1.4", false, false ], "Zlib": [ "zlib License", true, false ], "bzip2-1.0.5": [ "bzip2 and libbzip2 License v1.0.5", false, false ], "bzip2-1.0.6": [ "bzip2 and libbzip2 License v1.0.6", false, false ], "curl": [ "curl License", false, false ], "diffmark": [ "diffmark license", false, false ], "dvipdfm": [ "dvipdfm License", false, false ], "eCos-2.0": [ "eCos license version 2.0", false, true ], "eGenix": [ "eGenix.com Public License 1.1.0", false, false ], "gSOAP-1.3b": [ "gSOAP Public License v1.3b", false, false ], "gnuplot": [ "gnuplot License", false, false ], "iMatix": [ "iMatix Standard Function Library Agreement", false, false ], "libtiff": [ "libtiff License", false, false ], "mpich2": [ "mpich2 License", false, false ], "psfrag": [ "psfrag License", false, false ], "psutils": [ "psutils License", false, false ], "wxWindows": [ "wxWindows Library License", false, true ], "xinetd": [ "xinetd License", false, false ], "xpp": [ "XPP License", false, false ], "zlib-acknowledgement": [ "zlib/libpng License with Acknowledgement", false, false ] }PK!]kpoetry/spdx/license.pyfrom collections import namedtuple class License(namedtuple('License', 'id name is_osi_approved is_deprecated')): CLASSIFIER_SUPPORTED = { # Not OSI Approved 'Aladdin', 'CC0-1.0', 'CECILL-B', 'CECILL-C', 'NPL-1.0', 'NPL-1.1', # OSI Approved 'AFPL', 'AFL-1.1', 'AFL-1.2', 'AFL-2.0', 'AFL-2.1', 'AFL-3.0', 'Apache-1.1', 'Apache-2.0', 'APSL-1.1', 'APSL-1.2', 'APSL-2.0', 'Artistic-1.0', 'Artistic-2.0', 'AAL', 'AGPL-3.0', 'AGPL-3.0-only', 'AGPL-3.0-or-later', 'BSL-1.0', 'BSD-2-Clause', 'BSD-3-Clause', 'CDDL-1.0', 'CECILL-2.1', 'CPL-1.0', 'EFL-1.0', 'EFL-2.0', 'EPL-1.0', 'EPL-2.0', 'EUPL-1.1', 'EUPL-1.2', 'GPL-2.0', 'GPL-2.0+', 'GPL-2.0-only', 'GPL-2.0-or-later', 'GPL-3.0', 'GPL-3.0+', 'GPL-3.0-only', 'GPL-3.0-or-later', 'LGPL-2.0', 'LGPL-2.0+', 'LGPL-2.0-only', 'LGPL-2.0-or-later', 'LGPL-3.0', 'LGPL-3.0+', 'LGPL-3.0-only', 'LGPL-3.0-or-later', 'MIT', 'MPL-1.0', 'MPL-1.1', 'MPL-1.2', 'Nokia', 'W3C', 'ZPL-1.0', 'ZPL-2.0', 'ZPL-2.1', } CLASSIFIER_NAMES = { # Not OSI Approved 'AFPL': 'Aladdin Free Public License (AFPL)', 'CC0-1.0': 'CC0 1.0 Universal (CC0 1.0) Public Domain Dedication', 'CECILL-B': 'CeCILL-B Free Software License Agreement (CECILL-B)', 'CECILL-C': 'CeCILL-C Free Software License Agreement (CECILL-C)', 'NPL-1.0': 'Netscape Public License (NPL)', 'NPL-1.1': 'Netscape Public License (NPL)', # OSI Approved 'AFL-1.1': 'Academic Free License (AFL)', 'AFL-1.2': 'Academic Free License (AFL)', 'AFL-2.0': 'Academic Free License (AFL)', 'AFL-2.1': 'Academic Free License (AFL)', 'AFL-3.0': 'Academic Free License (AFL)', 'Apache-1.1': 'Apache Software License', 'Apache-2.0': 'Apache Software License', 'APSL-1.1': 'Apple Public Source License', 'APSL-1.2': 'Apple Public Source License', 'APSL-2.0': 'Apple Public Source License', 'Artistic-1.0': 'Artistic License', 'Artistic-2.0': 'Artistic License', 'AAL': 'Attribution Assurance License', 'AGPL-3.0': 'GNU Affero General Public License v3', 'AGPL-3.0-only': 'GNU Affero General Public License v3', 'AGPL-3.0-or-later': 'GNU Affero General Public License v3 or later (AGPLv3+)', 'BSL-1.0': 'Boost Software License 1.0 (BSL-1.0)', 'BSD-2-Clause': 'BSD License', 'BSD-3-Clause': 'BSD License', 'CDDL-1.0': 'Common Development and Distribution License 1.0 (CDDL-1.0)', 'CECILL-2.1': 'CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)', 'CPL-1.0': 'Common Public License', 'EPL-1.0': 'Eclipse Public License 1.0 (EPL-1.0)', 'EFL-1.0': 'Eiffel Forum License', 'EFL-2.0': 'Eiffel Forum License', 'EUPL-1.1': 'European Union Public Licence 1.1 (EUPL 1.1)', 'EUPL-1.2': 'European Union Public Licence 1.2 (EUPL 1.2)', 'GPL-2.0': 'GNU General Public License v2 (GPLv2)', 'GPL-2.0-only': 'GNU General Public License v2 (GPLv2)', 'GPL-2.0+': 'GNU General Public License v2 or later (GPLv2+)', 'GPL-2.0-or-later': 'GNU General Public License v2 or later (GPLv2+)', 'GPL-3.0': 'GNU General Public License v3 (GPLv3)', 'GPL-3.0-only': 'GNU General Public License v3 (GPLv3)', 'GPL-3.0+': 'GNU General Public License v3 or later (GPLv3+)', 'GPL-3.0-or-later': 'GNU General Public License v3 or later (GPLv3+)', 'LGPL-2.0': 'GNU Lesser General Public License v2 (LGPLv2)', 'LGPL-2.0-only': 'GNU Lesser General Public License v2 (LGPLv2)', 'LGPL-2.0+': 'GNU Lesser General Public License v2 or later (LGPLv2+)', 'LGPL-2.0-or-later': 'GNU Lesser General Public License v2 or later (LGPLv2+)', 'LGPL-3.0': 'GNU Lesser General Public License v3 (LGPLv3)', 'LGPL-3.0-only': 'GNU Lesser General Public License v3 (LGPLv3)', 'LGPL-3.0+': 'GNU Lesser General Public License v3 or later (LGPLv3+)', 'LGPL-3.0-or-later': 'GNU Lesser General Public License v3 or later (LGPLv3+)', 'MPL-1.0': 'Mozilla Public License 1.0 (MPL)', 'MPL-1.1': 'Mozilla Public License 1.1 (MPL 1.1)', 'MPL-2.0': 'Mozilla Public License 2.0 (MPL 2.0)', 'W3C': 'W3C License', 'ZPL-1.1': 'Zope Public License', 'ZPL-2.0': 'Zope Public License', 'ZPL-2.1': 'Zope Public License', } @property def classifier(self): parts = ['License'] if self.is_osi_approved: parts.append('OSI Approved') name = self.classifier_name if name is not None: parts.append(name) return ' :: '.join(parts) @property def classifier_name(self): if self.id not in self.CLASSIFIER_SUPPORTED: if self.is_osi_approved: return None return 'Other/Proprietary License' if self.id in self.CLASSIFIER_NAMES: return self.CLASSIFIER_NAMES[self.id] return self.name PK!>ԓpoetry/spdx/updater.pyimport json import os try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen class Updater: BASE_URL = 'https://raw.githubusercontent.com/spdx/license-list-data/master/json/' def __init__(self, base_url=BASE_URL): self._base_url = base_url def dump(self, file=None): if file is None: file = os.path.join( os.path.dirname(__file__), 'data', 'licenses.json' ) licenses_url = self._base_url + 'licenses.json' with open(file, 'w') as f: f.write( json.dumps( self.get_licenses(licenses_url), indent=2, sort_keys=True ) ) def get_licenses(self, url): licenses = {} with urlopen(url) as r: data = json.loads(r.read().decode()) for info in data['licenses']: licenses[info['licenseId']] = [ info['name'], info['isOsiApproved'], info['isDeprecatedLicenseId'] ] return licenses PK!HWpoetry/toml/__init__.py""" This toml module is a port with changes and fixes of [contoml](https://github.com/jumpscale7/python-consistent-toml). """ from .toml_file import TOMLFile from .prettify.lexer import tokenize as lexer from .prettify.parser import parse_tokens def loads(text): """ Parses TOML text into a dict-like object and returns it. """ tokens = tuple(lexer(text, is_top_level=True)) elements = parse_tokens(tokens) return TOMLFile(elements) def load(file_path): """ Parses a TOML file into a dict-like object and returns it. """ with open(file_path) as fd: return loads(fd.read()) def dumps(value): """ Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ if not isinstance(value, TOMLFile): raise RuntimeError( 'Can only dump a TOMLFile instance loaded by load() or loads()' ) return value.dumps() def dump(obj, file_path, prettify=False): """ Dumps a data structure to the filesystem as TOML. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ with open(file_path, 'w') as fp: fp.write(dumps(obj)) PK!)œpoetry/toml/array.pyfrom .prettify.errors import InvalidValueError from .freshtable import FreshTable from .prettify import util class ArrayOfTables(list): def __init__(self, toml_file, name, iterable=None): if iterable: list.__init__(self, iterable) self._name = name self._toml_file = toml_file def append(self, value): if isinstance(value, dict): table = FreshTable(parent=self, name=self._name, is_array=True) table._append_to_parent() index = len(self._toml_file[self._name]) - 1 for key_seq, value in util.flatten_nested(value).items(): # self._toml_file._setitem_with_key_seq((self._name, index) + key_seq, value) self._toml_file._array_setitem_with_key_seq(self._name, index, key_seq, value) # for k, v in value.items(): # table[k] = v else: raise InvalidValueError('Can only append a dict to an array of tables') def __getitem__(self, item): try: return list.__getitem__(self, item) except IndexError: if item == len(self): return FreshTable(parent=self, name=self._name, is_array=True) else: raise def append_fresh_table(self, fresh_table): list.append(self, fresh_table) if self._toml_file: self._toml_file.append_fresh_table(fresh_table) PK!poetry/toml/cascadedict.pyimport operator from functools import reduce from . import raw class CascadeDict: """ A dict-like object made up of one or more other dict-like objects where querying for an item cascade-gets it from all the internal dicts in order of their listing, and setting an item sets it on the first dict listed. """ def __init__(self, *internal_dicts): assert internal_dicts, 'internal_dicts cannot be empty' self._internal_dicts = tuple(internal_dicts) def cascaded_with(self, one_more_dict): """ Returns another instance with one more dict cascaded at the end. """ dicts = self._internal_dicts + one_more_dict return CascadeDict(*dicts) def __getitem__(self, item): for d in self._internal_dicts: try: return d[item] except KeyError: pass raise KeyError def __setitem__(self, key, value): for d in self._internal_dicts[1:]: if key in d: d[key] = value self._internal_dicts[0][key] = value def get(self, item, default=None): try: return self[item] except KeyError: return default def keys(self): return set(reduce(operator.or_, (set(d.keys()) for d in self._internal_dicts))) def items(self): all_items = reduce(operator.add, (list(d.items()) for d in reversed(self._internal_dicts))) unique_items = {k: v for k, v in all_items}.items() return tuple(unique_items) def __contains__(self, item): for d in self._internal_dicts: if item in d: return True return False def __len__(self): return len(self.keys()) @property def neutralized(self): return {k: raw.to_raw(v) for k, v in self.items()} @property def primitive_value(self): return self.neutralized def __repr__(self): return repr(self.primitive_value) PK!opoetry/toml/freshtable.pyfrom .prettify.elements.table import TableElement class FreshTable(TableElement): """ A fresh TableElement that appended itself to each of parents when it first gets written to at most once. parents is a sequence of objects providing an append_fresh_table(TableElement) method """ def __init__(self, parent, name, is_array=False): TableElement.__init__(self, sub_elements=[]) self._parent = parent self._name = name self._is_array = is_array # As long as this flag is false, setitem() operations will append the table header and this table # to the toml_file's elements self.__appended = False @property def name(self): return self._name @property def is_array(self): return self._is_array def _append_to_parent(self): """ Causes this ephemeral table to be persisted on the TOMLFile. """ if self.__appended: return if self._parent is not None: self._parent.append_fresh_table(self) self.__appended = True def __setitem__(self, key, value): TableElement.__setitem__(self, key, value) self._append_to_parent() PK!poetry/toml/peekableit.pyimport itertools class PeekableIterator: # Returned by peek() when the iterator is exhausted. Truthiness is False. Nothing = tuple() def __init__(self, iter): self._iter = iter def __next__(self): return next(self._iter) def next(self): return self.__next__() def __iter__(self): return self def peek(self): """ Returns PeekableIterator.Nothing when the iterator is exhausted. """ try: v = next(self._iter) self._iter = itertools.chain((v,), self._iter) return v except StopIteration: return PeekableIterator.Nothing PK!+gŸ poetry/toml/prettify/__init__.pyfrom ._version import VERSION __version__ = VERSION def prettify(toml_text): """ Prettifies and returns the TOML file content provided. """ from .parser import parse_tokens from .lexer import tokenize from .prettifier import prettify as element_prettify tokens = tokenize(toml_text, is_top_level=True) elements = parse_tokens(tokens) prettified = element_prettify(elements) return ''.join(pretty_element.serialized() for pretty_element in prettified) def prettify_from_file(file_path): """ Reads, prettifies and returns the TOML file specified by the file_path. """ with open(file_path, 'r') as fp: return prettify(fp.read()) PK!y poetry/toml/prettify/_version.pyVERSION = 'master' PK!css)poetry/toml/prettify/elements/__init__.py """ TOML file elements (a higher abstraction layer than individual lexical tokens). """ from .traversal import TraversalMixin from .errors import InvalidElementError from .table import TableElement from .tableheader import TableHeaderElement from .common import TYPE_METADATA, TYPE_ATOMIC, TYPE_CONTAINER, TYPE_MARKUP from . import traversal from . import factory PK!5 .poetry/toml/prettify/elements/abstracttable.pyfrom .common import ContainerElement from . import traversal class AbstractTable(ContainerElement, traversal.TraversalMixin, dict): """ Common code for handling tables as key-value pairs with metadata elements sprinkled all over. Assumes input sub_elements are correct. """ def __init__(self, sub_elements): ContainerElement.__init__(self, sub_elements) self._fallback = None def _enumerate_items(self): """ Returns ((key_index, key_element), (value_index, value_element)) for all the element key-value pairs. """ non_metadata = self._enumerate_non_metadata_sub_elements() while True: yield next(non_metadata), next(non_metadata) def items(self): for (key_i, key), (value_i, value) in self._enumerate_items(): yield key.value, value.value if self._fallback: for key, value in self._fallback.items(): yield key, value def keys(self): return tuple(key for (key, _) in self.items()) def values(self): return tuple(value for (_, value) in self.items()) def __len__(self): return len(tuple(self._enumerate_items())) def __contains__(self, item): return item in self.keys() def _find_key_and_value(self, key): """ Returns (key_i, value_i) corresponding to the given key value. Raises KeyError if no matching key found. """ for (key_i, key_element), (value_i, value_element) in self._enumerate_items(): if key_element.value == key: return key_i, value_i raise KeyError def __getitem__(self, item): for key, value in self.items(): if key == item: return value raise KeyError def get(self, key, default=None): try: return self[key] except KeyError: return default def set_fallback(self, fallback): """ Sets a fallback dict-like instance to be used to look up values after they are not found in this instance. """ self._fallback = fallback @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ return { key: value.primitive_value if hasattr(value, 'primitive_value') else value for key, value in self.items() } PK!BN&poetry/toml/prettify/elements/array.pyfrom . import factory, traversal from .common import Element, ContainerElement from .factory import create_element from .metadata import NewlineElement from .errors import InvalidElementError class ArrayElement(ContainerElement, traversal.TraversalMixin, list): """ A sequence-like container element containing other atomic elements or other containers. Implements list-like interface. Assumes input sub_elements are correct for an array element. Raises an InvalidElementError if contains heterogeneous values. """ def __init__(self, sub_elements): super(ArrayElement, self).__init__(sub_elements) self._check_homogeneity() def _check_homogeneity(self): if len(set(type(v) for v in self.primitive_value)) > 1: raise InvalidElementError('Array should be homogeneous') def __len__(self): return len(tuple(self._enumerate_non_metadata_sub_elements())) def __getitem__(self, i): """ Returns the ith entry, which can be a primitive value, a seq-lie, or a dict-like object. """ return self._find_value(i)[1].value def __setitem__(self, i, value): value_i, _ = self._find_value(i) new_element = value if isinstance(value, Element) else factory.create_element(value) self._sub_elements = self.sub_elements[:value_i] + [new_element] + self.sub_elements[value_i+1:] @property def value(self): return self # self is a sequence-like value @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ return list( self[i].primitive_value if hasattr(self[i], 'primitive_value') else self[i] for i in range(len(self))) def __str__(self): return "{}".format(self.primitive_value) def __repr__(self): return "Array{}".format(str(self)) def append(self, v): new_entry = [create_element(v)] if self: # If not empty, we need a comma and whitespace prefix! new_entry = [ factory.create_operator_element(','), factory.create_whitespace_element(), ] + new_entry insertion_index = self._find_closing_square_bracket() self._sub_elements = self._sub_elements[:insertion_index] + new_entry + \ self._sub_elements[insertion_index:] def _find_value(self, i): """ Returns (value_index, value) of ith value in this sequence. Raises IndexError if not found. """ return tuple(self._enumerate_non_metadata_sub_elements())[i] def __delitem__(self, i): value_i, value = self._find_value(i) begin, end = value_i, value_i+1 # Rules: # 1. begin should be index to the preceding comma to the value # 2. end should be index to the following comma, or the closing bracket # 3. If no preceding comma found but following comma found then end should be the index of the following value preceding_comma = self._find_preceding_comma(value_i) found_preceding_comma = preceding_comma >= 0 if found_preceding_comma: begin = preceding_comma following_comma = self._find_following_comma(value_i) if following_comma >= 0: if not found_preceding_comma: end = self._find_following_non_metadata(following_comma) else: end = following_comma else: end = self._find_following_closing_square_bracket(0) self._sub_elements = self.sub_elements[:begin] + self._sub_elements[end:] @property def is_multiline(self): return any(isinstance(e, (NewlineElement)) for e in self.elements) def turn_into_multiline(self): """ Turns this array into a multi-line array with each element lying on its own line. """ if self.is_multiline: return i = self._find_following_comma(-1) def next_entry_i(): return self._find_following_non_metadata(i) def next_newline_i(): return self._find_following_newline(i) def next_closing_bracket_i(): return self._find_following_closing_square_bracket(i) def next_comma_i(): return self._find_following_comma(i) while i < len(self.elements)-1: if next_newline_i() < next_entry_i(): self.elements.insert(i+1, factory.create_newline_element()) if float('-inf') < next_comma_i() < next_closing_bracket_i(): i = next_comma_i() else: i = next_closing_bracket_i() PK!='poetry/toml/prettify/elements/atomic.pyfrom ..tokens import py2toml, toml2py from ..util import is_dict_like, is_sequence_like from . import common from .errors import InvalidElementError class AtomicElement(common.TokenElement): """ An element containing a sequence of tokens representing a single atomic value that can be updated in place. Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_ATOMIC) def _validate_tokens(self, _tokens): if len([token for token in _tokens if not token.type.is_metadata]) != 1: raise InvalidElementError('Tokens making up an AtomicElement must contain only one non-metadata token') def serialized(self): return ''.join(token.source_substring for token in self.tokens) def _value_token_index(self): """ Finds the token where the value is stored. """ # TODO: memoize this value for i, token in enumerate(self.tokens): if not token.type.is_metadata: return i raise RuntimeError('could not find a value token') @property def value(self): """ Returns a Python value contained in this atomic element. """ return toml2py.deserialize(self._tokens[self._value_token_index()]) @property def primitive_value(self): return self.value def set(self, value): """ Sets the contained value to the given one. """ assert (not is_sequence_like(value)) and (not is_dict_like(value)), 'the value must be an atomic primitive' token_index = self._value_token_index() self._tokens[token_index] = py2toml.create_primitive_token(value) PK!/  'poetry/toml/prettify/elements/common.pyfrom abc import abstractmethod TYPE_METADATA = 'element-metadata' TYPE_ATOMIC = 'element-atomic' TYPE_CONTAINER = 'element-container' TYPE_MARKUP = 'element-markup' class Element: """ An Element: - is one or more Token instances, or one or more other Element instances. Not both. - knows how to serialize its value back to valid TOML code. A non-metadata Element is an Element that: - knows how to deserialize its content into usable Python primitive, seq-like, or dict-like value. - knows how to update its content from a Python primitive, seq-like, or dict-like value while maintaining its formatting. """ def __init__(self, _type): self._type = _type @property def type(self): return self._type @abstractmethod def serialized(self): """ TOML serialization of this element as str. """ raise NotImplementedError class TokenElement(Element): """ An Element made up of tokens """ def __init__(self, _tokens, _type): Element.__init__(self, _type) self._validate_tokens(_tokens) self._tokens = list(_tokens) @property def tokens(self): return self._tokens @property def first_token(self): return self._tokens[0] @abstractmethod def _validate_tokens(self, _tokens): raise NotImplementedError def serialized(self): return ''.join(token.source_substring for token in self._tokens) def __repr__(self): return repr(self.tokens) @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ raise NotImplementedError class ContainerElement(Element): """ An Element containing exclusively other elements. """ def __init__(self, sub_elements): Element.__init__(self, TYPE_CONTAINER) self._sub_elements = list(sub_elements) @property def sub_elements(self): return self._sub_elements @property def elements(self): return self.sub_elements def serialized(self): return ''.join(element.serialized() for element in self.sub_elements) def __eq__(self, other): return self.primitive_value == other def __repr__(self): return repr(self.primitive_value) @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ raise NotImplementedError PK!Vػbb'poetry/toml/prettify/elements/errors.py class InvalidElementError(Exception): """ Raised by Element factories when the given sequence of tokens or sub-elements are invalid for the specific type of Element being created. """ def __init__(self, message): self.message = message def __repr__(self): return "InvalidElementError: {}".format(self.message) PK!AL(poetry/toml/prettify/elements/factory.pyimport datetime import six from .. import tokens from ..tokens import py2toml from ..util import join_with from .atomic import AtomicElement from .metadata import PunctuationElement, WhitespaceElement, NewlineElement from .tableheader import TableHeaderElement def create_element(value, multiline_strings_allowed=True): """ Creates and returns the appropriate elements.Element instance from the given Python primitive, sequence-like, or dict-like value. """ from .array import ArrayElement if isinstance(value, (int, float, bool, datetime.datetime, datetime.date) + six.string_types) or value is None: primitive_token = py2toml.create_primitive_token(value, multiline_strings_allowed=multiline_strings_allowed) return AtomicElement((primitive_token,)) elif isinstance(value, (list, tuple)): preamble = [create_operator_element('[')] postable = [create_operator_element(']')] stuffing_elements = [create_element(v) for v in value] spaced_stuffing = join_with(stuffing_elements, separator=[create_operator_element(','), create_whitespace_element()]) return ArrayElement(preamble + spaced_stuffing + postable) elif isinstance(value, dict): return create_inline_table(value, multiline_table=False, multiline_strings_allowed=multiline_strings_allowed) else: raise RuntimeError('Value type unaccounted for: {} of type {}'.format(value, type(value))) def create_inline_table(from_dict, multiline_table=False, multiline_strings_allowed=True): """ Creates an InlineTable element from the given dict instance. """ from .inlinetable import InlineTableElement preamble = [create_operator_element('{')] postable = [create_operator_element('}')] stuffing_elements = ( ( create_string_element(k, bare_allowed=True), create_whitespace_element(), create_operator_element('='), create_whitespace_element(), create_element(v, multiline_strings_allowed=False) ) for (k, v) in from_dict.items()) pair_separator = [create_operator_element(','), create_newline_element() if multiline_table else create_whitespace_element()] spaced_elements = join_with(stuffing_elements, separator=pair_separator) return InlineTableElement(preamble + spaced_elements + postable) def create_string_element(value, bare_allowed=False): """ Creates and returns an AtomicElement wrapping a string value. """ return AtomicElement((py2toml.create_string_token(value, bare_allowed),)) def create_operator_element(operator): """ Creates a PunctuationElement instance containing an operator token of the specified type. The operator should be a TOML source str. """ operator_type_map = { ',': tokens.TYPE_OP_COMMA, '=': tokens.TYPE_OP_ASSIGNMENT, '[': tokens.TYPE_OP_SQUARE_LEFT_BRACKET, ']': tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, '[[': tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, ']]': tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, '{': tokens.TYPE_OP_CURLY_LEFT_BRACKET, '}': tokens.TYPE_OP_CURLY_RIGHT_BRACKET, } ts = (tokens.Token(operator_type_map[operator], operator),) return PunctuationElement(ts) def create_newline_element(): """ Creates and returns a single NewlineElement. """ ts = (tokens.Token(tokens.TYPE_NEWLINE, '\n'),) return NewlineElement(ts) def create_whitespace_element(length=1, char=' '): """ Creates and returns a WhitespaceElement containing spaces. """ ts = (tokens.Token(tokens.TYPE_WHITESPACE, char),) * length return WhitespaceElement(ts) def create_table_header_element(names): name_elements = [] if isinstance(names, six.string_types): name_elements = [py2toml.create_string_token(names, bare_string_allowed=True)] else: for (i, name) in enumerate(names): name_elements.append(py2toml.create_string_token(name, bare_string_allowed=True)) if i < (len(names)-1): name_elements.append(py2toml.operator_token(tokens.TYPE_OPT_DOT)) return TableHeaderElement( [py2toml.operator_token(tokens.TYPE_OP_SQUARE_LEFT_BRACKET)] + name_elements + [py2toml.operator_token(tokens.TYPE_OP_SQUARE_RIGHT_BRACKET), py2toml.operator_token(tokens.TYPE_NEWLINE)], ) def create_array_of_tables_header_element(name): return TableHeaderElement(( py2toml.operator_token(tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET), py2toml.create_string_token(name, bare_string_allowed=True), py2toml.operator_token(tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET), py2toml.operator_token(tokens.TYPE_NEWLINE), )) def create_table(dict_value): """ Creates a TableElement out of a dict instance. """ from .table import TableElement if not isinstance(dict_value, dict): raise ValueError('input must be a dict instance.') table_element = TableElement([create_newline_element()]) for k, v in dict_value.items(): table_element[k] = create_element(v) return table_element def create_multiline_string(text, maximum_line_length): return AtomicElement(_tokens=[py2toml.create_multiline_string(text, maximum_line_length)]) PK!- - ,poetry/toml/prettify/elements/inlinetable.pyfrom . import factory, abstracttable from .common import Element class InlineTableElement(abstracttable.AbstractTable): """ An Element containing key-value pairs, representing an inline table. Implements dict-like interface. Assumes input sub_elements are correct for an inline table element. """ def __init__(self, sub_elements): abstracttable.AbstractTable.__init__(self, sub_elements) def __setitem__(self, key, value): new_element = value if isinstance(value, Element) else factory.create_element(value) try: key_i, value_i = self._find_key_and_value(key) # Found, then replace the value element with a new one self._sub_elements = self.sub_elements[:value_i] + [new_element] + self.sub_elements[value_i+1:] except KeyError: # Key does not exist, adding anew! new_entry = [ factory.create_string_element(key, bare_allowed=True), factory.create_whitespace_element(), factory.create_operator_element('='), factory.create_whitespace_element(), new_element, ] if self: # If not empty new_entry = [ factory.create_operator_element(','), factory.create_whitespace_element(), ] + new_entry insertion_index = self._find_closing_curly_bracket() self._sub_elements = self.sub_elements[:insertion_index] + new_entry + self.sub_elements[insertion_index:] def __delitem__(self, key): key_i, value_i = self._find_key_and_value(key) begin, end = key_i, value_i+1 # Rules: # 1. begin should be index to the preceding comma to the key # 2. end should be index to the following comma, or the closing bracket # 3. If no preceding comma found but following comma found then end should be the index of the following key preceding_comma = self._find_preceding_comma(begin) found_preceding_comma = preceding_comma >= 0 if found_preceding_comma: begin = preceding_comma following_comma = self._find_following_comma(value_i) if following_comma >= 0: if not found_preceding_comma: end = self._find_following_non_metadata(following_comma) else: end = following_comma else: end = self._find_closing_curly_bracket() self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:] def multiline_equivalent(self): return factory.create_inline_table(self.primitive_value, multiline_table=True, multiline_strings_allowed=True) @property def value(self): return self # self is a dict-like value that is perfectly usable PK!(6 )poetry/toml/prettify/elements/metadata.pyfrom .. import tokens from . import common from .errors import InvalidElementError class WhitespaceElement(common.TokenElement): """ An element that contains tokens of whitespace """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) def _validate_tokens(self, _tokens): for token in _tokens: if token.type != tokens.TYPE_WHITESPACE: raise InvalidElementError('Tokens making up a WhitespaceElement must all be whitespace') @property def length(self): """ The whitespace length of this element """ return len(self.tokens) class NewlineElement(common.TokenElement): """ An element containing newline tokens Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) def _validate_tokens(self, _tokens): for token in _tokens: if token.type != tokens.TYPE_NEWLINE: raise InvalidElementError('Tokens making a NewlineElement must all be newlines') class CommentElement(common.TokenElement): """ An element containing a single comment token followed by a newline. Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) def _validate_tokens(self, _tokens): if len(_tokens) != 2 or _tokens[0].type != tokens.TYPE_COMMENT or _tokens[1].type != tokens.TYPE_NEWLINE: raise InvalidElementError('CommentElement needs one comment token followed by one newline token') class PunctuationElement(common.TokenElement): """ An element containing a single punctuation token. Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) @property def token(self): """ Returns the token contained in this Element. """ return self.tokens[0] def _validate_tokens(self, _tokens): if not _tokens or not tokens.is_operator(_tokens[0]): raise InvalidElementError('PunctuationElement must be made of only a single operator token') PK!DC(&poetry/toml/prettify/elements/table.pyfrom . import abstracttable, common, factory from .errors import InvalidElementError from .common import Element from .metadata import CommentElement, NewlineElement, WhitespaceElement class TableElement(abstracttable.AbstractTable): """ An Element containing an unnamed top-level table. Implements dict-like interface. Assumes input sub_elements are correct. Raises InvalidElementError on duplicate keys. """ def __init__(self, sub_elements): abstracttable.AbstractTable.__init__(self, sub_elements) self._check_for_duplicate_keys() def _check_for_duplicate_keys(self): if len(set(self.keys())) < len(self.keys()): raise InvalidElementError('Duplicate keys found') def __setitem__(self, key, value): if key in self: self._update(key, value) else: self._insert(key, value) def _update(self, key, value): _, value_i = self._find_key_and_value(key) self._sub_elements[value_i] = value if isinstance(value, Element) else factory.create_element(value) def _find_insertion_index(self): """ Returns the self.sub_elements index in which new entries should be inserted. """ non_metadata_elements = tuple(self._enumerate_non_metadata_sub_elements()) if not non_metadata_elements: return 0 last_entry_i = non_metadata_elements[-1][0] following_newline_i = self._find_following_line_terminator(last_entry_i) return following_newline_i + 1 def _detect_indentation_size(self): """ Detects the level of indentation used in this table. """ def lines(): # Returns a sequence of sequences of elements belonging to each line start = 0 for i, element in enumerate(self.elements): if isinstance(element, (CommentElement, NewlineElement)): yield self.elements[start:i+1] start = i+1 def indentation(line): # Counts the number of whitespace tokens at the beginning of this line try: first_non_whitespace_i = next(i for (i, e) in enumerate(line) if not isinstance(e, WhitespaceElement)) return sum(space.length for space in line[:first_non_whitespace_i]) except StopIteration: return 0 def is_empty_line(line): return all(e.type == common.TYPE_METADATA for e in line) try: return min(indentation(line) for line in lines() if len(line) > 1 and not is_empty_line(line)) except ValueError: # Raised by ValueError when no matching lines found return 0 def _insert(self, key, value): value_element = value if isinstance(value, Element) else factory.create_element(value) indentation_size = self._detect_indentation_size() indentation = [factory.create_whitespace_element(self._detect_indentation_size())] if indentation_size else [] inserted_elements = indentation + [ factory.create_string_element(key, bare_allowed=True), factory.create_whitespace_element(), factory.create_operator_element('='), factory.create_whitespace_element(), value_element, factory.create_newline_element(), ] insertion_index = self._find_insertion_index() self._sub_elements = \ self.sub_elements[:insertion_index] + inserted_elements + self.sub_elements[insertion_index:] def __delitem__(self, key): begin, _ = self._find_key_and_value(key) preceding_newline = self._find_preceding_newline(begin) if preceding_newline >= 0: begin = preceding_newline end = self._find_following_newline(begin) if end < 0: end = len(tuple(self._sub_elements)) self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:] @property def value(self): return self def __eq__(self, other): return self.primitive_value == other def __iter__(self): return iter(self.keys()) def __str__(self): return str(self.primitive_value) PK!oR R ,poetry/toml/prettify/elements/tableheader.pyfrom .. import tokens from ..tokens import toml2py from . import common from .common import TokenElement from .errors import InvalidElementError _opening_bracket_types = (tokens.TYPE_OP_SQUARE_LEFT_BRACKET, tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET) _closing_bracket_types = (tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET) _name_types = ( tokens.TYPE_BARE_STRING, tokens.TYPE_LITERAL_STRING, tokens.TYPE_STRING, ) class TableHeaderElement(TokenElement): """ An element containing opening and closing single and double square brackets, strings and dots and ending with a newline. Raises InvalidElementError. """ def __init__(self, _tokens): TokenElement.__init__(self, _tokens, common.TYPE_MARKUP) self._names = tuple(toml2py.deserialize(token) for token in self._tokens if token.type in _name_types) @property def is_array_of_tables(self): opening_bracket = next(token for i, token in enumerate(self._tokens) if token.type in _opening_bracket_types) return opening_bracket.type == tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET @property def names(self): """ Returns a sequence of string names making up this table header name. """ return self._names def has_name_prefix(self, names): """ Returns True if the header names is prefixed by the given sequence of names. """ for i, name in enumerate(names): if self.names[i] != name: return False return True def serialized(self): return ''.join(token.source_substring for token in self._tokens) def is_named(self, names): """ Returns True if the given name sequence matches the full name of this header. """ return tuple(names) == self.names def _validate_tokens(self, _tokens): opening_bracket_i = next((i for i, token in enumerate(_tokens) if token.type in _opening_bracket_types), float('-inf')) if opening_bracket_i < 0: raise InvalidElementError('Expected an opening bracket') _tokens = _tokens[opening_bracket_i+1:] first_name_i = next((i for i, token in enumerate(_tokens) if token.type in _name_types), float('-inf')) if first_name_i < 0: raise InvalidElementError('Expected a table header name') _tokens = _tokens[first_name_i+1:] while True: next_dot_i = next((i for i, token in enumerate(_tokens) if token.type == tokens.TYPE_OPT_DOT), float('-inf')) if next_dot_i < 0: break _tokens = _tokens[next_dot_i+1:] next_name_i = next((i for i, token in enumerate(_tokens) if token.type in _name_types), float('-inf')) if next_name_i < 0: raise InvalidElementError('Expected a name after the dot') _tokens = _tokens[next_name_i+1:] closing_bracket_i = next((i for i, token in enumerate(_tokens) if token.type in _closing_bracket_types), float('-inf')) if closing_bracket_i < 0: raise InvalidElementError('Expected a closing bracket') if _tokens[-1].type != tokens.TYPE_NEWLINE: raise InvalidElementError('Must end with a newline') PK!2'663poetry/toml/prettify/elements/traversal/__init__.pyfrom ...tokens import TYPE_OP_COMMA from ...tokens import TYPE_OP_CURLY_RIGHT_BRACKET from ..common import TYPE_METADATA from ..metadata import PunctuationElement, NewlineElement from . import predicates class TraversalMixin: """ A mix-in that provides convenient sub-element traversal to any class with an `elements` member that is a sequence of Element instances """ def __find_following_element(self, index, predicate): """ Finds and returns the index of element in self.elements that evaluates the given predicate to True and whose index is higher than the given index, or returns -Infinity on failure. """ return find_following(self.elements, predicate, index) def __find_preceding_element(self, index, predicate): """ Finds and returns the index of the element in self.elements that evaluates the given predicate to True and whose index is lower than the given index. """ i = find_previous(self.elements, predicate, index) if i == float('inf'): return float('-inf') return i def __must_find_following_element(self, predicate): """ Finds and returns the index to the element in self.elements that evaluatest the predicate to True, or raises an error. """ i = self.__find_following_element(-1, predicate) if i < 0: raise RuntimeError('Could not find non-optional element') return i def _enumerate_non_metadata_sub_elements(self): """ Returns a sequence of of (index, sub_element) of the non-metadata sub-elements. """ return ((i, element) for i, element in enumerate(self.elements) if element.type != TYPE_METADATA) def _find_preceding_comma(self, index): """ Returns the index of the preceding comma element to the given index, or -Infinity. """ return self.__find_preceding_element(index, predicates.op_comma) def _find_following_comma(self, index): """ Returns the index of the following comma element after the given index, or -Infinity. """ def predicate(element): return isinstance(element, PunctuationElement) and element.token.type == TYPE_OP_COMMA return self.__find_following_element(index, predicate) def _find_following_newline(self, index): """ Returns the index of the following newline element after the given index, or -Infinity. """ return self.__find_following_element(index, lambda e: isinstance(e, NewlineElement)) def _find_following_comment(self, index): """ Returns the index of the following comment element after the given index, or -Infinity. """ return self.__find_following_element(index, predicates.comment) def _find_following_line_terminator(self, index): """ Returns the index of the following comment or newline element after the given index, or -Infinity. """ following_comment = self._find_following_comment(index) following_newline = self._find_following_newline(index) if following_comment == float('-inf'): return following_newline if following_newline == float('inf'): return following_comment if following_newline < following_comment: return following_newline else: return following_comment def _find_preceding_newline(self, index): """ Returns the index of the preceding newline element to the given index, or -Infinity. """ return self.__find_preceding_element(index, predicates.newline) def _find_following_non_metadata(self, index): """ Returns the index to the following non-metadata element after the given index, or -Infinity. """ return self.__find_following_element(index, predicates.non_metadata) def _find_closing_square_bracket(self): """ Returns the index to the closing square bracket, or raises an Error. """ return self.__must_find_following_element(predicates.closing_square_bracket) def _find_following_opening_square_bracket(self, index): """ Returns the index to the opening square bracket, or -Infinity. """ return self.__find_following_element(index, predicates.opening_square_bracket) def _find_following_closing_square_bracket(self, index): """ Returns the index to the closing square bracket, or -Infinity. """ return self.__find_following_element(index, predicates.closing_square_bracket) def _find_following_table(self, index): """ Returns the index to the next TableElement after the specified index, or -Infinity. """ return self.__find_following_element(index, predicates.table) def _find_preceding_table(self, index): """ Returns the index to the preceding TableElement to the specified index, or -Infinity. """ return self.__find_preceding_element(index,predicates.table) def _find_closing_curly_bracket(self): """ Returns the index to the closing curly bracket, or raises an Error. """ def predicate(element): return isinstance(element, PunctuationElement) and element.token.type == TYPE_OP_CURLY_RIGHT_BRACKET return self.__must_find_following_element(predicate) def _find_following_table_header(self, index): """ Returns the index to the table header after the given element index, or -Infinity. """ return self.__find_following_element(index, predicates.table_header) def find_following(element_seq, predicate, index=None): """ Finds and returns the index of the next element fulfilling the specified predicate after the specified index, or -Infinity. Starts searching linearly from the start_from index. """ if isinstance(index, (int, float)) and index < 0: index = None for i, element in tuple(enumerate(element_seq))[index+1 if index is not None else index:]: if predicate(element): return i return float('-inf') def find_previous(element_seq, predicate, index=None): """ Finds and returns the index of the previous element fulfilling the specified predicate preceding to the specified index, or Infinity. """ if isinstance(index, (int, float)) and index >= len(element_seq): index = None for i, element in reversed(tuple(enumerate(element_seq))[:index]): if predicate(element): return i return float('inf') PK!$#X<<5poetry/toml/prettify/elements/traversal/predicates.py """ The following predicates can be used in the traversal functions directly. """ from ...tokens import TYPE_OP_ASSIGNMENT from ...tokens import TYPE_OP_COMMA from ...tokens import TYPE_OP_SQUARE_LEFT_BRACKET from ...tokens import TYPE_OP_SQUARE_RIGHT_BRACKET from ..atomic import AtomicElement from ..metadata import PunctuationElement, CommentElement, NewlineElement, WhitespaceElement from .. import common atomic = lambda e: isinstance(e, AtomicElement) op_assignment = lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_ASSIGNMENT op_comma = lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_COMMA comment = lambda e: isinstance(e, CommentElement) newline = lambda e: isinstance(e, NewlineElement) non_metadata = lambda e: e.type != common.TYPE_METADATA closing_square_bracket = \ lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_SQUARE_RIGHT_BRACKET opening_square_bracket = \ lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_SQUARE_LEFT_BRACKET def table(e): from ..table import TableElement return isinstance(e, TableElement) def table_header(e): from ..tableheader import TableHeaderElement return isinstance(e, TableHeaderElement) whitespace = lambda e: isinstance(e, WhitespaceElement) PK!/|!!poetry/toml/prettify/errors.py class TOMLError(Exception): """ All errors raised by this module are descendants of this type. """ class InvalidTOMLFileError(TOMLError): pass class NoArrayFoundError(TOMLError): """ An array of tables was requested but none exist by the given name. """ class InvalidValueError(TOMLError): pass class DuplicateKeysError(TOMLError): """ Duplicate keys detected in the parsed file. """ class DuplicateTablesError(TOMLError): """ Duplicate tables detected in the parsed file. """ PK! S&poetry/toml/prettify/lexer/__init__.py """ A regular expression based Lexer/tokenizer for TOML. """ from collections import namedtuple import re from .. import tokens from ..errors import TOMLError TokenSpec = namedtuple('TokenSpec', ('type', 're')) # Specs of all the valid tokens _LEXICAL_SPECS = ( TokenSpec(tokens.TYPE_COMMENT, re.compile(r'^(#.*)\n')), TokenSpec(tokens.TYPE_STRING, re.compile(r'^("(([^"]|\\")+?[^\\]|([^"]|\\")|)")')), # Single line only TokenSpec(tokens.TYPE_MULTILINE_STRING, re.compile(r'^(""".*?""")', re.DOTALL)), TokenSpec(tokens.TYPE_LITERAL_STRING, re.compile(r"^('.*?')")), TokenSpec(tokens.TYPE_MULTILINE_LITERAL_STRING, re.compile(r"^('''.*?''')", re.DOTALL)), TokenSpec(tokens.TYPE_BARE_STRING, re.compile(r'^([A-Za-z0-9_-]+)')), TokenSpec(tokens.TYPE_DATE, re.compile( r'^([0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]*)?)?(([zZ])|((\+|-)[0-9]{2}:[0-9]{2}))?)')), TokenSpec(tokens.TYPE_WHITESPACE, re.compile(r'^( |\t)', re.DOTALL)), TokenSpec(tokens.TYPE_INTEGER, re.compile(r'^(((\+|-)[0-9_]+)|([0-9][0-9_]*))')), TokenSpec(tokens.TYPE_FLOAT, re.compile(r'^((((\+|-)[0-9_]+)|([1-9][0-9_]*))(\.[0-9_]+)?([eE](\+|-)?[0-9_]+)?)')), TokenSpec(tokens.TYPE_BOOLEAN, re.compile(r'^(true|false)')), TokenSpec(tokens.TYPE_OP_SQUARE_LEFT_BRACKET, re.compile(r'^(\[)')), TokenSpec(tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, re.compile(r'^(\])')), TokenSpec(tokens.TYPE_OP_CURLY_LEFT_BRACKET, re.compile(r'^(\{)')), TokenSpec(tokens.TYPE_OP_CURLY_RIGHT_BRACKET, re.compile(r'^(\})')), TokenSpec(tokens.TYPE_OP_ASSIGNMENT, re.compile(r'^(=)')), TokenSpec(tokens.TYPE_OP_COMMA, re.compile(r'^(,)')), TokenSpec(tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, re.compile(r'^(\[\[)')), TokenSpec(tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, re.compile(r'^(\]\])')), TokenSpec(tokens.TYPE_OPT_DOT, re.compile(r'^(\.)')), TokenSpec(tokens.TYPE_NEWLINE, re.compile('^(\n|\r\n)')), ) def _next_token_candidates(source): matches = [] for token_spec in _LEXICAL_SPECS: match = token_spec.re.search(source) if match: matches.append(tokens.Token(token_spec.type, match.group(1))) return matches def _choose_from_next_token_candidates(candidates): if len(candidates) == 1: return candidates[0] elif len(candidates) > 1: # Return the maximal-munch with ties broken by natural order of token type. maximal_munch_length = max(len(token.source_substring) for token in candidates) maximal_munches = [token for token in candidates if len(token.source_substring) == maximal_munch_length] return sorted(maximal_munches)[0] # Return the first in sorting by priority def _munch_a_token(source): """ Munches a single Token instance if it could recognize one at the beginning of the given source text, or None if no token type could be recognized. """ candidates = _next_token_candidates(source) return _choose_from_next_token_candidates(candidates) class LexerError(TOMLError): def __init__(self, message): self._message = message def __repr__(self): return self._message def __str__(self): return self._message def tokenize(source, is_top_level=False): """ Tokenizes the input TOML source into a stream of tokens. If is_top_level is set to True, will make sure that the input source has a trailing newline character before it is tokenized. Raises a LexerError when it fails recognize another token while not at the end of the source. """ # Newlines are going to be normalized to UNIX newlines. source = source.replace('\r\n', '\n') if is_top_level and source and source[-1] != '\n': source += '\n' next_row = 1 next_col = 1 next_index = 0 while next_index < len(source): new_token = _munch_a_token(source[next_index:]) if not new_token: raise LexerError("failed to read the next token at ({}, {}): {}".format( next_row, next_col, source[next_index:])) # Set the col and row on the new token new_token = tokens.Token(new_token.type, new_token.source_substring, next_col, next_row) # Advance the index, row and col count next_index += len(new_token.source_substring) for c in new_token.source_substring: if c == '\n': next_row += 1 next_col = 1 else: next_col += 1 yield new_token PK!"+'''poetry/toml/prettify/parser/__init__.py """ A parser for TOML tokens into TOML elements. """ from .elementsanitizer import sanitize from .errors import ParsingError from .parser import toml_file_elements from .tokenstream import TokenStream def parse_tokens(tokens): """ Parses the given token sequence into a sequence of top-level TOML elements. Raises ParserError on invalid TOML input. """ return _parse_token_stream(TokenStream(tokens)) def _parse_token_stream(token_stream): """ Parses the given token_stream into a sequence of top-level TOML elements. Raises ParserError on invalid input TOML. """ elements, pending = toml_file_elements(token_stream) if not pending.at_end: raise ParsingError('Failed to parse line {}'.format(pending.head.row)) return sanitize(elements) PK!0W/poetry/toml/prettify/parser/elementsanitizer.pyfrom ..elements import TYPE_METADATA from ..elements.table import TableElement from ..elements.tableheader import TableHeaderElement from ..errors import InvalidTOMLFileError from ..util import PeekableIterator def sanitize(_elements): """ Finds TableHeader elements that are not followed by TableBody elements and inserts empty TableElement right after those. """ output = list(_elements) def find_next_table_header(after=-1): return next((i for (i, element) in enumerate(output) if i > after and isinstance(element, TableHeaderElement)), float('-inf')) def find_next_table_body(after=-1): return next((i for (i, element) in enumerate(output) if i > after and isinstance(element, TableElement)), float('-inf')) next_table_header_i = find_next_table_header() while next_table_header_i >= 0: following_table_header_i = find_next_table_header(next_table_header_i) following_table_body_i = find_next_table_body(next_table_header_i) if (following_table_body_i < 0) or \ (following_table_header_i >= 0 and (following_table_header_i < following_table_body_i)): output.insert(next_table_header_i+1, TableElement(tuple())) next_table_header_i = find_next_table_header(next_table_header_i) return output def validate_sanitized(_elements): # Non-metadata elements must start with an optional TableElement, # followed by zero or more (TableHeaderElement, TableElement) pairs. if not _elements: return it = PeekableIterator(e for e in _elements if e.type != TYPE_METADATA) if isinstance(it.peek(), TableElement): it.next() while it.peek(): if not isinstance(it.peek(), TableHeaderElement): raise InvalidTOMLFileError it.next() if not isinstance(it.peek(), TableElement): raise InvalidTOMLFileError it.next() PK!%poetry/toml/prettify/parser/errors.pyfrom ..errors import TOMLError class ParsingError(TOMLError): def __init__(self, message='', token=None): self.message = message self.token = token def __repr__(self): if self.message and self.token: return "{} at row {} and col {}".format( self.message, self.token.row, self.token.col ) else: return self.message def __str__(self): return repr(self) PK!@(66%poetry/toml/prettify/parser/parser.py """ A Recursive Descent implementation of a lexical parser for TOML. Grammar: -------- Newline -> NEWLINE Comment -> COMMENT Newline LineTerminator -> Comment | Newline Space -> WHITESPACE Space | WHITESPACE | EMPTY TableHeader -> Space [ Space TableHeaderName Space ] Space LineTerminator | Space [[ Space TableHeaderName Space ]] Space LineTerminator TableHeaderName -> STRING Space '.' Space TableHeaderName | STRING Atomic -> STRING | INTEGER | FLOAT | DATE | BOOLEAN Array -> '[' Space ArrayInternal Space ']' | '[' Space ArrayInternal Space LineTerminator Space ']' ArrayInternal -> LineTerminator Space ArrayInternal | Value Space ',' Space LineTerminator Space ArrayInternal | Value Space ',' Space ArrayInternal | LineTerminator | Value | EMPTY InlineTable -> '{' Space InlineTableInternal Space '}' InlineTableKeyValuePair = STRING Space '=' Space Value InlineTableInternal -> InlineTableKeyValuePair Space ',' Space InlineTableInternal | InlineTableKeyValuePair | Empty Value -> Atomic | InlineTable | Array KeyValuePair -> Space STRING Space '=' Space Value Space LineTerminator TableBody -> KeyValuePair TableBody | EmptyLine TableBody | EmptyLine | KeyValuePair EmptyLine -> Space LineTerminator FileEntry -> TableHeader | TableBody TOMLFileElements -> FileEntry TOMLFileElements | FileEntry | EmptyLine | EMPTY """ from ..elements.array import ArrayElement from ..elements.atomic import AtomicElement from ..elements.inlinetable import InlineTableElement from ..elements.metadata import NewlineElement, CommentElement, WhitespaceElement, PunctuationElement from ..elements.table import TableElement from ..elements.tableheader import TableHeaderElement from ..tokens import TYPE_BARE_STRING from ..tokens import TYPE_BOOLEAN from ..tokens import TYPE_COMMENT from ..tokens import TYPE_DATE from ..tokens import TYPE_FLOAT from ..tokens import TYPE_INTEGER from ..tokens import TYPE_LITERAL_STRING from ..tokens import TYPE_MULTILINE_LITERAL_STRING from ..tokens import TYPE_MULTILINE_STRING from ..tokens import TYPE_NEWLINE from ..tokens import TYPE_OP_ASSIGNMENT from ..tokens import TYPE_OP_COMMA from ..tokens import TYPE_OP_CURLY_LEFT_BRACKET from ..tokens import TYPE_OP_CURLY_RIGHT_BRACKET from ..tokens import TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET from ..tokens import TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET from ..tokens import TYPE_OP_SQUARE_LEFT_BRACKET from ..tokens import TYPE_OP_SQUARE_RIGHT_BRACKET from ..tokens import TYPE_OPT_DOT from ..tokens import TYPE_STRING from ..tokens import TYPE_WHITESPACE from .recdesc import capture_from from .errors import ParsingError """ Non-terminals are represented as functions which return (RESULT, pending_token_stream), or raise ParsingError. """ def token(token_type): def factory(ts): t = ts.head if t.type != token_type: raise ParsingError('Expected a token of type {}'.format(token_type)) return t, ts.tail return factory def newline_element(token_stream): """ Returns NewlineElement, pending_token_stream or raises ParsingError. """ captured = capture_from(token_stream).find(token(TYPE_NEWLINE)) return NewlineElement(captured.value()), captured.pending_tokens def comment_tokens(ts1): c1 = capture_from(ts1).find(token(TYPE_COMMENT)).and_find(token(TYPE_NEWLINE)) return c1.value(), c1.pending_tokens def comment_element(token_stream): """ Returns CommentElement, pending_token_stream or raises ParsingError. """ captured = capture_from(token_stream).find(comment_tokens) return CommentElement(captured.value()), captured.pending_tokens def line_terminator_tokens(token_stream): captured = capture_from(token_stream).find(comment_tokens).or_find(token(TYPE_NEWLINE)) return captured.value(), captured.pending_tokens def line_terminator_element(token_stream): captured = capture_from(token_stream).find(comment_element).or_find(newline_element) return captured.value('Expected a comment or a newline')[0], captured.pending_tokens def zero_or_more_tokens(token_type): def factory(token_stream): def more(ts): c = capture_from(ts).find(token(token_type)).and_find(zero_or_more_tokens(token_type)) return c.value(), c.pending_tokens def two(ts): c = capture_from(ts).find(token(TYPE_WHITESPACE)) return c.value(), c.pending def zero(ts): return tuple(), ts captured = capture_from(token_stream).find(more).or_find(two).or_find(zero) return captured.value(), captured.pending_tokens return factory def space_element(token_stream): captured = capture_from(token_stream).find(zero_or_more_tokens(TYPE_WHITESPACE)) return WhitespaceElement([t for t in captured.value() if t]), captured.pending_tokens def string_token(token_stream): captured = capture_from(token_stream).\ find(token(TYPE_BARE_STRING)).\ or_find(token(TYPE_STRING)).\ or_find(token(TYPE_LITERAL_STRING)).\ or_find(token(TYPE_MULTILINE_STRING)).\ or_find(token(TYPE_MULTILINE_LITERAL_STRING)) return captured.value('Expected a string'), captured.pending_tokens def string_element(token_stream): captured = capture_from(token_stream).find(string_token) return AtomicElement(captured.value()), captured.pending_tokens def table_header_name_tokens(token_stream): def one(ts): c = capture_from(ts).\ find(string_token).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(token(TYPE_OPT_DOT)).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(table_header_name_tokens) return c.value(), c.pending_tokens captured = capture_from(token_stream).find(one).or_find(string_token) return captured.value(), captured.pending_tokens def table_header_element(token_stream): def single(ts1): c1 = capture_from(ts1).\ find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(token(TYPE_OP_SQUARE_LEFT_BRACKET)).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(table_header_name_tokens).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(token(TYPE_OP_SQUARE_RIGHT_BRACKET)).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(line_terminator_tokens) return c1.value(), c1.pending_tokens def double(ts2): c2 = capture_from(ts2).\ find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(token(TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET)).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(table_header_name_tokens).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(token(TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET)).\ and_find(zero_or_more_tokens(TYPE_WHITESPACE)).\ and_find(line_terminator_tokens) return c2.value(), c2.pending_tokens captured = capture_from(token_stream).find(single).or_find(double) return TableHeaderElement(captured.value()), captured.pending_tokens def atomic_element(token_stream): captured = capture_from(token_stream).\ find(string_token).\ or_find(token(TYPE_INTEGER)).\ or_find(token(TYPE_FLOAT)).\ or_find(token(TYPE_DATE)).\ or_find(token(TYPE_BOOLEAN)) return AtomicElement(captured.value('Expected an atomic primitive value')), captured.pending_tokens def punctuation_element(token_type): def factory(ts): c = capture_from(ts).find(token(token_type)) return PunctuationElement(c.value('Expected the punctuation element: {}'.format(token_type))), c.pending_tokens return factory def value(token_stream): captured = capture_from(token_stream).\ find(atomic_element).\ or_find(array_element).\ or_find(inline_table_element) return captured.value('Expected a primitive value, array or an inline table'), captured.pending_tokens def array_internal(ts): def zero(ts0): c = capture_from(ts0).\ and_find(line_terminator_element).\ and_find(space_element).\ and_find(array_internal) return c.value(), c.pending_tokens def one(ts1): c = capture_from(ts1).\ find(value).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_COMMA)).\ and_find(space_element).\ and_find(line_terminator_element).\ and_find(space_element).\ and_find(array_internal) return c.value(), c.pending_tokens def two(ts2): c = capture_from(ts2).\ find(value).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_COMMA)).\ and_find(space_element).\ and_find(array_internal) return c.value(), c.pending_tokens def three(ts3): c = capture_from(ts3).\ find(space_element).\ and_find(line_terminator_element) return c.value(), c.pending_tokens captured = capture_from(ts).find(zero).or_find(one).or_find(two).or_find(three).or_find(value).or_empty() return captured.value(), captured.pending_tokens def array_element(token_stream): def one(ts1): ca = capture_from(ts1).\ find(punctuation_element(TYPE_OP_SQUARE_LEFT_BRACKET)).\ and_find(space_element).\ and_find(array_internal).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_SQUARE_RIGHT_BRACKET)) return ca.value(), ca.pending_tokens def two(ts2): ca = capture_from(ts2).\ find(punctuation_element(TYPE_OP_SQUARE_LEFT_BRACKET)).\ and_find(space_element).\ and_find(array_internal).\ and_find(space_element).\ and_find(line_terminator_element).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_SQUARE_RIGHT_BRACKET)) return ca.value(), ca.pending_tokens captured = capture_from(token_stream).find(one).or_find(two) return ArrayElement(captured.value()), captured.pending_tokens def inline_table_element(token_stream): # InlineTableElement -> '{' Space InlineTableInternal Space '}' # InlineTableKeyValuePair = STRING Space '=' Space Value # InlineTableInternal -> InlineTableKeyValuePair Space ',' Space InlineTableInternal | # InlineTableKeyValuePair | Empty def key_value(ts): ca = capture_from(ts).\ find(string_element).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_ASSIGNMENT)).\ and_find(space_element).\ and_find(value) return ca.value(), ca.pending_tokens def internal(ts): def one(ts1): c1 = capture_from(ts1).\ find(key_value).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_COMMA)).\ and_find(space_element).\ and_find(internal) return c1.value(), c1.pending_tokens c = capture_from(ts).find(one).or_find(key_value).or_empty() return c.value(), c.pending_tokens captured = capture_from(token_stream).\ find(punctuation_element(TYPE_OP_CURLY_LEFT_BRACKET)).\ and_find(space_element).\ and_find(internal).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_CURLY_RIGHT_BRACKET)) return InlineTableElement(captured.value()), captured.pending_tokens def key_value_pair(token_stream): captured = capture_from(token_stream).\ find(space_element).\ and_find(string_element).\ and_find(space_element).\ and_find(punctuation_element(TYPE_OP_ASSIGNMENT)).\ and_find(space_element).\ and_find(value).\ and_find(space_element).\ and_find(line_terminator_element) return captured.value(), captured.pending_tokens def table_body_elements(token_stream): # TableBody -> KeyValuePair TableBody | EmptyLine TableBody | EmptyLine | KeyValuePair def one(ts1): c = capture_from(ts1).\ find(key_value_pair).\ and_find(table_body_elements) return c.value(), c.pending_tokens def two(ts2): c = capture_from(ts2).\ find(empty_line_elements).\ and_find(table_body_elements) return c.value(), c.pending_tokens captured = capture_from(token_stream).\ find(one).\ or_find(two).\ or_find(empty_line_elements).\ or_find(key_value_pair) return captured.value(), captured.pending_tokens def table_body_element(token_stream): captured = capture_from(token_stream).find(table_body_elements) return TableElement(captured.value()), captured.pending_tokens def empty_line_tokens(ts1): c1 = capture_from(ts1).find(space_element).and_find(line_terminator_element) return c1.value(), c1.pending_tokens def empty_line_elements(token_stream): captured = capture_from(token_stream).find(empty_line_tokens) return captured.value(), captured.pending_tokens def file_entry_element(token_stream): captured = capture_from(token_stream).find(table_header_element).\ or_find(table_body_element) return captured.value(), captured.pending_tokens def toml_file_elements(token_stream): def one(ts1): c1 = capture_from(ts1).find(file_entry_element).and_find(toml_file_elements) return c1.value(), c1.pending_tokens captured = capture_from(token_stream).find(one).or_find(file_entry_element).or_empty() return captured.value(), captured.pending_tokens PK!^/**&poetry/toml/prettify/parser/recdesc.pyfrom ..elements.array import ArrayElement from .errors import ParsingError from .tokenstream import TokenStream class Capturer: """ Recursive-descent matching DSL. Yeah.. """ def __init__(self, token_stream, value=tuple(), dormant_error=None): self._token_stream = token_stream self._value = value self._dormant_error = dormant_error def find(self, finder): """ Searches the token stream using the given finder. `finder(ts)` is a function that accepts a `TokenStream` instance and returns `(element, pending_ts)` where `element` is the found "something" or a sequence of "somethings", and `pending_ts` the unconsumed `TokenStream`. `finder(ts)` can raise `ParsingError` to indicate that it couldn't find anything, or a `TokenStream.EndOfStream` to indicate a premature end of the TokenStream. This method returns a Capturer instance that can be further used to find more and more "somethings". The value at any given moment can be retrieved via the `Capturer.value()` method. """ try: # Execute finder! element, pending_ts = finder(self._token_stream) # If result is not a sequence, make it so if isinstance(element, ArrayElement) or not isinstance(element, (tuple, list)): element = (element,) # Return a Capturer with accumulated findings return Capturer(pending_ts, value=self.value() + element) except ParsingError as e: # Failed to find, store error in returned value return Capturer(self._token_stream, dormant_error=e) except TokenStream.EndOfStream as e: # Premature end of stream, store error in returned value return Capturer(self._token_stream, dormant_error=e) def value(self, parsing_expectation_msg=None): """ Returns the accumulated values found as a sequence of values, or raises an encountered dormant error. If parsing_expectation_msg is specified and a dormant_error is a ParsingError, the expectation message is used instead in it. """ if self._dormant_error: if parsing_expectation_msg and isinstance(self._dormant_error, ParsingError): raise ParsingError(parsing_expectation_msg, token=self._token_stream.head) else: raise self._dormant_error return self._value @property def pending_tokens(self): """ Returns a TokenStream with the pending tokens yet to be processed. """ return self._token_stream def or_find(self, finder): """ If a dormant_error is present, try this new finder instead. If not, does nothing. """ if self._dormant_error: return Capturer(self._token_stream).find(finder) else: return self def or_end_of_file(self): """ Discards any errors if at end of the stream. """ if isinstance(self._dormant_error, TokenStream.EndOfStream): return Capturer(self.pending_tokens, value=self._value) else: return self def or_empty(self): """ Discards any previously-encountered dormant error. """ if self._dormant_error: return Capturer(self.pending_tokens, value=self._value) else: return self def and_find(self, finder): """ Accumulate new "somethings" to the stored value using the given finder. """ if self._dormant_error: return Capturer(self.pending_tokens, dormant_error=self._dormant_error) return Capturer(self.pending_tokens, self.value()).find(finder) def capture_from(token_stream): return Capturer(token_stream) PK!AUU*poetry/toml/prettify/parser/tokenstream.pyclass TokenStream: """ An immutable subset of a token sequence """ class EndOfStream(Exception): pass Nothing = tuple() def __init__(self, _tokens, offset=0): if isinstance(_tokens, tuple): self._tokens = _tokens else: self._tokens = tuple(_tokens) self._head_index = offset def __len__(self): return len(self._tokens) - self.offset @property def head(self): try: return self._tokens[self._head_index] except IndexError: raise TokenStream.EndOfStream @property def tail(self): return TokenStream(self._tokens, offset=self._head_index+1) @property def offset(self): return self._head_index @property def at_end(self): return self.offset >= len(self._tokens) PK!amm'poetry/toml/prettify/tokens/__init__.py """ TOML lexical tokens. """ class TokenType: """ A TokenType is a concrete type of a source token along with a defined priority and a higher-order kind. The priority will be used in determining the tokenization behaviour of the lexer in the following manner: whenever more than one token is recognizable as the next possible token and they are all of equal source length, this priority is going to be used to break the tie by favoring the token type of the lowest priority value. A TokenType instance is naturally ordered by its priority. """ def __init__(self, name, priority, is_metadata): self._priority = priority self._name = name self._is_metadata = is_metadata @property def is_metadata(self): return self._is_metadata @property def priority(self): return self._priority def __repr__(self): return "{}-{}".format(self.priority, self._name) def __lt__(self, other): return isinstance(other, TokenType) and self._priority < other.priority # Possible types of tokens TYPE_BOOLEAN = TokenType('boolean', 0, is_metadata=False) TYPE_INTEGER = TokenType('integer', 0, is_metadata=False) TYPE_OP_COMMA = TokenType('comma', 0, is_metadata=True) TYPE_OP_SQUARE_LEFT_BRACKET = TokenType('square_left_bracket', 0, is_metadata=True) TYPE_OP_SQUARE_RIGHT_BRACKET = TokenType('square_right_bracket', 0, is_metadata=True) TYPE_OP_CURLY_LEFT_BRACKET = TokenType('curly_left_bracket', 0, is_metadata=True) TYPE_OP_CURLY_RIGHT_BRACKET = TokenType('curly_right_bracket', 0, is_metadata=True) TYPE_OP_ASSIGNMENT = TokenType('assignment', 0, is_metadata=True) TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET = TokenType('double_square_left_bracket', 0, is_metadata=True) TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET = TokenType('double_square_right_bracket', 0, is_metadata=True) TYPE_FLOAT = TokenType('float', 1, is_metadata=False) TYPE_DATE = TokenType('date', 40, is_metadata=False) TYPE_OPT_DOT = TokenType('dot', 40, is_metadata=True) TYPE_BARE_STRING = TokenType('bare_string', 50, is_metadata=False) TYPE_STRING = TokenType('string', 90, is_metadata=False) TYPE_MULTILINE_STRING = TokenType('multiline_string', 90, is_metadata=False) TYPE_LITERAL_STRING = TokenType('literal_string', 90, is_metadata=False) TYPE_MULTILINE_LITERAL_STRING = TokenType('multiline_literal_string', 90, is_metadata=False) TYPE_NEWLINE = TokenType('newline', 91, is_metadata=True) TYPE_WHITESPACE = TokenType('whitespace', 93, is_metadata=True) TYPE_COMMENT = TokenType('comment', 95, is_metadata=True) def is_operator(token): """ Returns True if the given token is an operator token. """ return token.type in ( TYPE_OP_COMMA, TYPE_OP_SQUARE_LEFT_BRACKET, TYPE_OP_SQUARE_RIGHT_BRACKET, TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, TYPE_OP_CURLY_LEFT_BRACKET, TYPE_OP_CURLY_RIGHT_BRACKET, TYPE_OP_ASSIGNMENT, TYPE_OPT_DOT, ) def is_string(token): return token.type in ( TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_LITERAL_STRING, TYPE_BARE_STRING, TYPE_MULTILINE_LITERAL_STRING ) class Token: """ A token/lexeme in a TOML source file. A Token instance is naturally ordered by its type. """ def __init__(self, _type, source_substring, col=None, row=None): self._source_substring = source_substring self._type = _type self._col = col self._row = row def __eq__(self, other): if not isinstance(other, Token): return False return self.source_substring == other.source_substring and self.type == other.type @property def col(self): """ Column number (1-indexed). """ return self._col @property def row(self): """ Row number (1-indexed). """ return self._row @property def type(self): """ One of of the TOKEN_TYPE_* constants. """ return self._type @property def source_substring(self): """ The substring of the initial source file containing this token. """ return self._source_substring def __lt__(self, other): return isinstance(other, Token) and self.type < other.type def __repr__(self): return "{}: {}".format(self.type, self.source_substring) PK!V%poetry/toml/prettify/tokens/errors.pyfrom ..errors import TOMLError class DeserializationError(TOMLError): pass class BadEscapeCharacter(TOMLError): pass class MalformedDateError(DeserializationError): pass PK!NZ[>>&poetry/toml/prettify/tokens/py2toml.py """ A converter of python values to TOML Token instances. """ import datetime import re from poetry.utils._compat import basestring from .. import tokens from ..errors import TOMLError from ..tokens import Token from ..util import chunkate_string class NotPrimitiveError(TOMLError): pass _operator_tokens_by_type = { tokens.TYPE_OP_SQUARE_LEFT_BRACKET: tokens.Token(tokens.TYPE_OP_SQUARE_LEFT_BRACKET, u'['), tokens.TYPE_OP_SQUARE_RIGHT_BRACKET: tokens.Token(tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, u']'), tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET: tokens.Token(tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, u'[['), tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET: tokens.Token(tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, u']]'), tokens.TYPE_OP_COMMA: tokens.Token(tokens.TYPE_OP_COMMA, u','), tokens.TYPE_NEWLINE: tokens.Token(tokens.TYPE_NEWLINE, u'\n'), tokens.TYPE_OPT_DOT: tokens.Token(tokens.TYPE_OPT_DOT, u'.'), } def operator_token(token_type): return _operator_tokens_by_type[token_type] def create_primitive_token(value, multiline_strings_allowed=True): """ Creates and returns a single token for the given primitive atomic value. Raises NotPrimitiveError when the given value is not a primitive atomic value """ if value is None: return create_primitive_token('') elif isinstance(value, bool): return tokens.Token(tokens.TYPE_BOOLEAN, u'true' if value else u'false') elif isinstance(value, int): return tokens.Token(tokens.TYPE_INTEGER, u'{}'.format(value)) elif isinstance(value, float): return tokens.Token(tokens.TYPE_FLOAT, u'{}'.format(value)) elif isinstance(value, (datetime.datetime, datetime.date, datetime.time)): return tokens.Token(tokens.TYPE_DATE, value.isoformat()) elif isinstance(value, basestring): return create_string_token(value, multiline_strings_allowed=multiline_strings_allowed) raise NotPrimitiveError("{} of type {}".format(value, type(value))) _bare_string_regex = re.compile('^[a-zA-Z0-9_-]*$') def create_string_token(text, bare_string_allowed=False, multiline_strings_allowed=True): """ Creates and returns a single string token. Raises ValueError on non-string input. """ if not isinstance(text, basestring): raise ValueError('Given value must be a string') if text == '': return tokens.Token(tokens.TYPE_STRING, '""'.format(_escape_single_line_quoted_string(text))) elif bare_string_allowed and _bare_string_regex.match(text): return tokens.Token(tokens.TYPE_BARE_STRING, text) elif multiline_strings_allowed and (len(tuple(c for c in text if c == '\n')) >= 2 or len(text) > 80): # If containing two or more newlines or is longer than 80 characters we'll use the multiline string format return _create_multiline_string_token(text) else: return tokens.Token(tokens.TYPE_STRING, u'"{}"'.format(_escape_single_line_quoted_string(text))) def _escape_single_line_quoted_string(text): return text.replace('"', '\\"') def _create_multiline_string_token(text): escaped = text.replace(u'"""', u'\"\"\"') if len(escaped) > 50: return tokens.Token(tokens.TYPE_MULTILINE_STRING, u'"""\n{}\\\n"""'.format(_break_long_text(escaped))) else: return tokens.Token(tokens.TYPE_MULTILINE_STRING, u'"""{}"""'.format(escaped)) def _break_long_text(text, maximum_length=75): """ Breaks into lines of 75 character maximum length that are terminated by a backslash. """ def next_line(remaining_text): # Returns a line and the remaining text if '\n' in remaining_text and remaining_text.index('\n') < maximum_length: i = remaining_text.index('\n') return remaining_text[:i+1], remaining_text[i+2:] elif len(remaining_text) > maximum_length and ' ' in remaining_text: i = remaining_text[:maximum_length].rfind(' ') return remaining_text[:i+1] + '\\\n', remaining_text[i+2:] else: return remaining_text, '' remaining_text = text lines = [] while remaining_text: line, remaining_text = next_line(remaining_text) lines += [line] return ''.join(lines) def create_whitespace(source_substring): return Token(tokens.TYPE_WHITESPACE, source_substring) def create_multiline_string(text, maximum_line_length=120): def escape(t): return t.replace(u'"""', u'\"\"\"') source_substring = u'"""\n{}"""'.format(u'\\\n'.join(chunkate_string(escape(text), maximum_line_length))) return Token(tokens.TYPE_MULTILINE_STRING, source_substring) PK!] &poetry/toml/prettify/tokens/toml2py.pyimport codecs import functools import operator import re import string from . import TYPE_BOOLEAN, TYPE_INTEGER, TYPE_FLOAT, TYPE_DATE, \ TYPE_MULTILINE_STRING, TYPE_BARE_STRING, TYPE_MULTILINE_LITERAL_STRING, TYPE_LITERAL_STRING, \ TYPE_STRING from .errors import MalformedDateError from .errors import BadEscapeCharacter def deserialize(token): """ Deserializes the value of a single tokens.Token instance based on its type. Raises DeserializationError when appropriate. """ if token.type == TYPE_BOOLEAN: return _to_boolean(token) elif token.type == TYPE_INTEGER: return _to_int(token) elif token.type == TYPE_FLOAT: return _to_float(token) elif token.type == TYPE_DATE: return _to_date(token) elif token.type in (TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_BARE_STRING, TYPE_LITERAL_STRING, TYPE_MULTILINE_LITERAL_STRING): return _to_string(token) else: raise Exception('This should never happen!') def _unescape_str(text): """ Unescapes a string according the TOML spec. Raises BadEscapeCharacter when appropriate. """ # Detect bad escape jobs bad_escape_regexp = re.compile(r'([^\\]|^)\\[^btnfr"\\uU]') if bad_escape_regexp.findall(text): raise BadEscapeCharacter # Do the unescaping return codecs.decode(_unicode_escaped_string(text), 'unicode-escape') def _unicode_escaped_string(text): """ Escapes all unicode characters in the given string """ def is_unicode(c): return c.lower() not in string.ascii_letters + string.whitespace + string.punctuation + string.digits def escape_unicode_char(x): return codecs.encode(x, 'unicode-escape') if any(is_unicode(c) for c in text): homogeneous_chars = tuple(escape_unicode_char(c) if is_unicode(c) else c.encode() for c in text) homogeneous_bytes = functools.reduce(operator.add, homogeneous_chars) return homogeneous_bytes.decode() else: return text def _to_string(token): if token.type == TYPE_BARE_STRING: return token.source_substring elif token.type == TYPE_STRING: escaped = token.source_substring[1:-1] return _unescape_str(escaped) elif token.type == TYPE_MULTILINE_STRING: escaped = token.source_substring[3:-3] # Drop the first newline if existed if escaped and escaped[0] == '\n': escaped = escaped[1:] # Remove all occurrences of a slash-newline-zero-or-more-whitespace patterns escaped = re.sub(r'\\\n\s*', repl='', string=escaped, flags=re.DOTALL) return _unescape_str(escaped) elif token.type == TYPE_LITERAL_STRING: return token.source_substring[1:-1] elif token.type == TYPE_MULTILINE_LITERAL_STRING: text = token.source_substring[3:-3] if text[0] == '\n': text = text[1:] return text raise RuntimeError('Control should never reach here.') def _to_int(token): return int(token.source_substring.replace('_', '')) def _to_float(token): assert token.type == TYPE_FLOAT string = token.source_substring.replace('_', '') return float(string) def _to_boolean(token): return token.source_substring == 'true' _correct_date_format = re.compile( r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(Z|([+-])(\d{2}):(\d{2}))' ) def _to_date(token): if not _correct_date_format.match(token.source_substring): raise MalformedDateError return token.source_substring PK!yۚpoetry/toml/prettify/util.pyimport itertools def is_sequence_like(x): """ Returns True if x exposes a sequence-like interface. """ required_attrs = ( '__len__', '__getitem__' ) return all(hasattr(x, attr) for attr in required_attrs) def is_dict_like(x): """ Returns True if x exposes a dict-like interface. """ required_attrs = ( '__len__', '__getitem__', 'keys', 'values', ) return all(hasattr(x, attr) for attr in required_attrs) def join_with(iterable, separator): """ Joins elements from iterable with separator and returns the produced sequence as a list. separator must be addable to a list. """ inputs = list(iterable) b = [] for i, element in enumerate(inputs): if isinstance(element, (list, tuple, set)): b += tuple(element) else: b += [element] if i < len(inputs)-1: b += separator return b def chunkate_string(text, length): """ Iterates over the given seq in chunks of at maximally the given length. Will never break a whole word. """ iterator_index = 0 def next_newline(): try: return next(i for (i, c) in enumerate(text) if i > iterator_index and c == '\n') except StopIteration: return len(text) def next_breaker(): try: return next(i for (i, c) in reversed(tuple(enumerate(text))) if i >= iterator_index and (i < iterator_index+length) and c in (' ', '\t')) except StopIteration: return len(text) while iterator_index < len(text): next_chunk = text[iterator_index:min(next_newline(), next_breaker()+1)] iterator_index += len(next_chunk) yield next_chunk def flatten_nested(nested_dicts): """ Flattens dicts and sequences into one dict with tuples of keys representing the nested keys. Example >>> dd = { \ 'dict1': {'name': 'Jon', 'id': 42}, \ 'dict2': {'name': 'Sam', 'id': 41}, \ 'seq1': [{'one': 1, 'two': 2}] \ } >>> flatten_nested(dd) == { \ ('dict1', 'name'): 'Jon', ('dict1', 'id'): 42, \ ('dict2', 'name'): 'Sam', ('dict2', 'id'): 41, \ ('seq1', 0, 'one'): 1, ('seq1', 0, 'two'): 2, \ } True """ assert isinstance(nested_dicts, (dict, list, tuple)), 'Only works with a collection parameter' def items(c): if isinstance(c, dict): return c.items() elif isinstance(c, (list, tuple)): return enumerate(c) else: raise RuntimeError('c must be a collection') def flatten(dd): output = {} for k, v in items(dd): if isinstance(v, (dict, list, tuple)): for child_key, child_value in flatten(v).items(): output[(k,) + child_key] = child_value else: output[(k,)] = v return output return flatten(nested_dicts) class PeekableIterator: # Returned by peek() when the iterator is exhausted. Truthiness is False. Nothing = tuple() def __init__(self, iter): self._iter = iter def __next__(self): return next(self._iter) def next(self): return self.__next__() def __iter__(self): return self def peek(self): """ Returns PeekableIterator.Nothing when the iterator is exhausted. """ try: v = next(self._iter) self._iter = itertools.chain((v,), self._iter) return v except StopIteration: return PeekableIterator.Nothing PK!)poetry/toml/raw.pyfrom .prettify.elements.abstracttable import AbstractTable def to_raw(x): from .cascadedict import CascadeDict if isinstance(x, AbstractTable): return x.primitive_value elif isinstance(x, CascadeDict): return x.neutralized elif isinstance(x, (list, tuple)): return [to_raw(y) for y in x] elif isinstance(x, dict): return {k: to_raw(v) for (k, v) in x.items()} else: return x PK!NNpoetry/toml/structurer.pyfrom . import toplevels from .cascadedict import CascadeDict class NamedDict(dict): """ A dict that can use Name instances as keys. """ def __init__(self, other_dict=None): dict.__init__(self) if other_dict: for k, v in other_dict.items(): self[k] = v def __setitem__(self, key, value): """ key can be an Name instance. When key is a path in the form of an Name instance, all the parents and grandparents of the value are created along the way as instances of NamedDict. If the parent of the value exists, it is replaced with a CascadeDict() that cascades the old parent value with a new NamedDict that contains the given child name and value. """ if isinstance(key, toplevels.Name): obj = self for i, name in enumerate(key.sub_names): if name in obj: if i == len(key.sub_names) - 1: obj[name] = CascadeDict(obj[name], value) else: obj[name] = CascadeDict(NamedDict(), obj[name]) else: if i == len(key.sub_names) - 1: obj[name] = value else: obj[name] = NamedDict() obj = obj[name] else: return dict.__setitem__(self, key, value) def __contains__(self, item): try: _ = self[item] return True except KeyError: return False def append(self, key, value): """ Makes sure the value pointed to by key exists and is a list and appends the given value to it. """ if key in self: self[key].append(value) else: self[key] = [value] def __getitem__(self, item): if isinstance(item, toplevels.Name): d = self for name in item.sub_names: d = d[name] return d else: return dict.__getitem__(self, item) def __eq__(self, other): return dict.__eq__(self, other) def structure(table_toplevels): """ Accepts an ordered sequence of TopLevel instances and returns a navigable object structure representation of the TOML file. """ table_toplevels = tuple(table_toplevels) obj = NamedDict() last_array_of_tables = None # The Name of the last array-of-tables header for toplevel in table_toplevels: if isinstance(toplevel, toplevels.AnonymousTable): obj[''] = toplevel.table_element elif isinstance(toplevel, toplevels.Table): if last_array_of_tables and toplevel.name.is_prefixed_with(last_array_of_tables): seq = obj[last_array_of_tables] unprefixed_name = toplevel.name.without_prefix(last_array_of_tables) seq[-1] = CascadeDict(seq[-1], NamedDict({unprefixed_name: toplevel.table_element})) else: obj[toplevel.name] = toplevel.table_element else: # It's an ArrayOfTables if last_array_of_tables and toplevel.name != last_array_of_tables and \ toplevel.name.is_prefixed_with(last_array_of_tables): seq = obj[last_array_of_tables] unprefixed_name = toplevel.name.without_prefix(last_array_of_tables) if unprefixed_name in seq[-1]: seq[-1][unprefixed_name].append(toplevel.table_element) else: cascaded_with = NamedDict({unprefixed_name: [toplevel.table_element]}) seq[-1] = CascadeDict(seq[-1], cascaded_with) else: obj.append(toplevel.name, toplevel.table_element) last_array_of_tables = toplevel.name return obj PK!{%%poetry/toml/toml_file.pyfrom .prettify.errors import NoArrayFoundError from . import structurer, toplevels, raw from .array import ArrayOfTables from .freshtable import FreshTable from .prettify.elements import factory as element_factory from .prettify import util class TOMLFile(dict): """ A TOMLFile object that tries its best to prserve formatting and order of mappings of the input source. Raises InvalidTOMLFileError on invalid input elements. Raises DuplicateKeysError, DuplicateTableError when appropriate. """ def __init__(self, _elements): self._elements = [] self._navigable = {} self.append_elements(_elements) def __getitem__(self, item): try: value = self._navigable[item] if isinstance(value, (list, tuple)): return ArrayOfTables(toml_file=self, name=item, iterable=value) else: return value except KeyError: return FreshTable(parent=self, name=item, is_array=False) def __contains__(self, item): return item in self.keys() def _setitem_with_key_seq(self, key_seq, value): """ Sets a the value in the TOML file located by the given key sequence. Example: self._setitem(('key1', 'key2', 'key3'), 'text_value') is equivalent to doing self['key1']['key2']['key3'] = 'text_value' """ table = self key_so_far = tuple() for key in key_seq[:-1]: key_so_far += (key,) self._make_sure_table_exists(key_so_far) table = table[key] table[key_seq[-1]] = value def _array_setitem_with_key_seq(self, array_name, index, key_seq, value): """ Sets a the array value in the TOML file located by the given key sequence. Example: self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value') is equivalent to doing self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value' """ table = self.array(array_name)[index] key_so_far = tuple() for key in key_seq[:-1]: key_so_far += (key,) new_table = self._array_make_sure_table_exists(array_name, index, key_so_far) if new_table is not None: table = new_table else: table = table[key] table[key_seq[-1]] = value def _make_sure_table_exists(self, name_seq): """ Makes sure the table with the full name comprising of name_seq exists. """ t = self for key in name_seq[:-1]: t = t[key] name = name_seq[-1] if name not in t: self.append_elements([element_factory.create_table_header_element(name_seq), element_factory.create_table({})]) def _array_make_sure_table_exists(self, array_name, index, name_seq): """ Makes sure the table with the full name comprising of name_seq exists. """ t = self[array_name][index] for key in name_seq[:-1]: t = t[key] name = name_seq[-1] if name not in t: new_table = element_factory.create_table({}) self.append_elements([element_factory.create_table_header_element((array_name,) + name_seq), new_table]) return new_table def __delitem__(self, key): table_element_index = self._elements.index(self._navigable[key]) self._elements[table_element_index] = element_factory.create_table({}) self._on_element_change() def __setitem__(self, key, value): # Setting an array-of-tables if key and isinstance(value, (tuple, list)) and value and all(isinstance(v, dict) for v in value): for table in value: self.array(key).append(table) # Or setting a whole single table elif isinstance(value, dict): if key and key in self: del self[key] for key_seq, child_value in util.flatten_nested({key: value}).items(): self._setitem_with_key_seq(key_seq, child_value) # if key in self._navigable: # del self[key] # index = self._elements.index(self._navigable[key]) # self._elements = self._elements[:index] + [element_factory.create_table(value)] + self._elements[index+1:] # else: # if key: # self._elements.append(element_factory.create_table_header_element(key)) # self._elements.append(element_factory.create_table(value)) # Or updating the anonymous section table else: # It's mea self[''][key] = value self._on_element_change() def _detect_toplevels(self): """ Returns a sequence of TopLevel instances for the current state of this table. """ return tuple(e for e in toplevels.identify(self.elements) if isinstance(e, toplevels.Table)) def _update_table_fallbacks(self, table_toplevels): """ Updates the fallbacks on all the table elements to make relative table access possible. Raises DuplicateKeysError if appropriate. """ if len(self.elements) <= 1: return def parent_of(toplevel): # Returns an TopLevel parent of the given entry, or None. for parent_toplevel in table_toplevels: if toplevel.name.sub_names[:-1] == parent_toplevel.name.sub_names: return parent_toplevel for entry in table_toplevels: if entry.name.is_qualified: parent = parent_of(entry) if parent: child_name = entry.name.without_prefix(parent.name) parent.table_element.set_fallback({child_name.sub_names[0]: entry.table_element}) def _recreate_navigable(self): if self._elements: self._navigable = structurer.structure(toplevels.identify(self._elements)) def array(self, name): """ Returns the array of tables with the given name. """ if name in self._navigable: if isinstance(self._navigable[name], (list, tuple)): return self[name] else: raise NoArrayFoundError else: return ArrayOfTables(toml_file=self, name=name) def _on_element_change(self): self._recreate_navigable() table_toplevels = self._detect_toplevels() self._update_table_fallbacks(table_toplevels) def append_elements(self, elements): """ Appends more elements to the contained internal elements. """ self._elements = self._elements + list(elements) self._on_element_change() def prepend_elements(self, elements): """ Prepends more elements to the contained internal elements. """ self._elements = list(elements) + self._elements self._on_element_change() def dumps(self): """ Returns the TOML file serialized back to str. """ return ''.join(element.serialized() for element in self._elements) def dump(self, file_path): with open(file_path, mode='w') as fp: fp.write(self.dumps()) def keys(self): return set(self._navigable.keys()) | {''} def values(self): return self._navigable.values() def items(self): items = list(self._navigable.items()) def has_anonymous_entry(): return any(key == '' for (key, _) in items) if has_anonymous_entry(): return items else: return items + [('', self[''])] def get(self, item, default=None): return self._navigable.get(item, default) @property def primitive(self): """ Returns a primitive object representation for this container (which is a dict). WARNING: The returned container does not contain any markup or formatting metadata. """ raw_container = raw.to_raw(self._navigable) # Collapsing the anonymous table onto the top-level container is present if '' in raw_container: raw_container.update(raw_container['']) del raw_container[''] return raw_container def append_fresh_table(self, fresh_table): """ Gets called by FreshTable instances when they get written to. """ if fresh_table.name: elements = [] if fresh_table.is_array: elements += [element_factory.create_array_of_tables_header_element(fresh_table.name)] else: elements += [element_factory.create_table_header_element(fresh_table.name)] elements += [fresh_table, element_factory.create_newline_element()] self.append_elements(elements) else: # It's an anonymous table self.prepend_elements([fresh_table, element_factory.create_newline_element()]) @property def elements(self): return self._elements def __str__(self): is_empty = (not self['']) and (not tuple(k for k in self.keys() if k)) def key_name(key): return '[ANONYMOUS]' if not key else key def pair(key, value): return '%s = %s' % (key_name(key), str(value)) content_text = '' if is_empty else \ '\n\t' + ',\n\t'.join(pair(k, v) for (k, v) in self.items() if v) + '\n' return "TOMLFile{%s}" % content_text def __repr__(self): return str(self) PK!p 1 def __str__(self): return '.'.join(self.sub_names) def __hash__(self): return hash(str(self)) def __eq__(self, other): return str(self) == str(other) def __ne__(self, other): return not self.__eq__(other) class AnonymousTable(TopLevel): def __init__(self, table_element): TopLevel.__init__(self, ('',), table_element) class Table(TopLevel): def __init__(self, names, table_element): TopLevel.__init__(self, names=names, table_element=table_element) class ArrayOfTables(TopLevel): def __init__(self, names, table_element): TopLevel.__init__(self, names=names, table_element=table_element) def _validate_file_elements(file_elements): pass def identify(file_elements): """ Outputs an ordered sequence of instances of TopLevel types. Elements start with an optional TableElement, followed by zero or more pairs of (TableHeaderElement, TableElement). """ if not file_elements: return _validate_file_elements(file_elements) # An iterator over enumerate(the non-metadata) elements iterator = PeekableIterator((element_i, element) for (element_i, element) in enumerate(file_elements) if element.type != elements.TYPE_METADATA) try: _, first_element = iterator.peek() if isinstance(first_element, TableElement): iterator.next() yield AnonymousTable(first_element) except KeyError: pass except StopIteration: return for element_i, element in iterator: if not isinstance(element, TableHeaderElement): continue # If TableHeader of a regular table, return Table following it if not element.is_array_of_tables: table_element_i, table_element = next(iterator) yield Table(names=element.names, table_element=table_element) # If TableHeader of an array of tables, do your thing else: table_element_i, table_element = next(iterator) yield ArrayOfTables(names=element.names, table_element=table_element) PK!poetry/utils/__init__.pyPK!H`Hpoetry/utils/_compat.pyimport sys try: import pathlib2 from pathlib2 import Path except ImportError: from pathlib import Path try: # Python 2 long = long unicode = unicode basestring = basestring except NameError: # Python 3 long = int unicode = str basestring = str PY2 = sys.version_info[0] == 2 PY36 = sys.version_info >= (3, 6) def decode(string, encodings=None): if not PY2 and not isinstance(string, bytes): return string if PY2 and isinstance(string, unicode): return string encodings = encodings or ['utf-8', 'latin1', 'ascii'] for encoding in encodings: try: return string.decode(encoding) except (UnicodeEncodeError, UnicodeDecodeError): pass return string.decode(encodings[0], errors='ignore') def encode(string, encodings=None): if not PY2 and isinstance(string, bytes): return string if PY2 and isinstance(string, str): return string encodings = encodings or ['utf-8', 'latin1', 'ascii'] for encoding in encodings: try: return string.encode(encoding) except (UnicodeEncodeError, UnicodeDecodeError): pass return string.encode(encodings[0], errors='ignore') def to_str(string): if isinstance(string, str) or not isinstance(string, (unicode, bytes)): return string if PY2: method = 'encode' else: method = 'decode' encodings = ['utf-8', 'latin1', 'ascii'] for encoding in encodings: try: return getattr(string, method)(encoding) except (UnicodeEncodeError, UnicodeDecodeError): pass return getattr(string, method)(encodings[0], errors='ignore') PK!""poetry/utils/appdirs.py""" This code was taken from https://github.com/ActiveState/appdirs and modified to suit our purposes. """ import os import sys WINDOWS = (sys.platform.startswith("win") or (sys.platform == 'cli' and os.name == 'nt')) def expanduser(path): """ Expand ~ and ~user constructions. Includes a workaround for http://bugs.python.org/issue14768 """ expanded = os.path.expanduser(path) if path.startswith('~/') and expanded.startswith('//'): expanded = expanded[1:] return expanded def user_cache_dir(appname): r""" Return full path to the user-specific cache dir for this application. "appname" is the name of application. Typical user cache directories are: macOS: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Windows: C:\Users\\AppData\Local\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir`). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. """ if WINDOWS: # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": # Get the base path path = expanduser("~/Library/Caches") # Add our app name to it path = os.path.join(path, appname) else: # Get the base path path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) # Add our app name to it path = os.path.join(path, appname) return path def user_data_dir(appname, roaming=False): """ Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\ ... ...Application Data\ Win XP (roaming): C:\Documents and Settings\\Local ... ...Settings\Application Data\ Win 7 (not roaming): C:\\Users\\AppData\Local\ Win 7 (roaming): C:\\Users\\AppData\Roaming\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". """ if WINDOWS: const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) elif sys.platform == "darwin": path = os.path.join( expanduser('~/Library/Application Support/'), appname, ) else: path = os.path.join( os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")), appname, ) return path def user_config_dir(appname, roaming=True): """Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default True) can be set False to not use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: same as user_data_dir Unix: ~/.config/ Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/". """ if WINDOWS: path = user_data_dir(appname, roaming=roaming) elif sys.platform == "darwin": path = user_data_dir(appname) else: path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config")) path = os.path.join(path, appname) return path # for the discussion regarding site_config_dirs locations # see def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: macOS: /Library/Application Support// Unix: /etc or $XDG_CONFIG_DIRS[i]// for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == 'darwin': pathlist = [os.path.join('/Library/Application Support', appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') if xdg_config_dirs: pathlist = [ os.path.join(expanduser(x), appname) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append('/etc') return pathlist # -- Windows support functions -- def _get_win_folder_from_registry(csidl_name): """ This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) directory, _type = _winreg.QueryValueEx(key, shell_folder_name) return directory def _get_win_folder_with_ctypes(csidl_name): csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value if WINDOWS: try: import ctypes _get_win_folder = _get_win_folder_with_ctypes except ImportError: _get_win_folder = _get_win_folder_from_registry def _win_path_to_bytes(path): """Encode Windows paths to bytes. Only used on Python 2. Motivation is to be consistent with other operating systems where paths are also returned as bytes. This avoids problems mixing bytes and Unicode elsewhere in the codebase. For more details and discussion see . If encoding using ASCII and MBCS fails, return the original Unicode path. """ for encoding in ('ASCII', 'MBCS'): try: return path.encode(encoding) except (UnicodeEncodeError, LookupError): pass return path PK!+Jpoetry/utils/helpers.pyimport re import shutil import tempfile from contextlib import contextmanager _canonicalize_regex = re.compile('[-_.]+') def canonicalize_name(name): # type: (str) -> str return _canonicalize_regex.sub('-', name).lower() def module_name(name): # type: (str) -> str return canonicalize_name(name).replace('-', '_') @contextmanager def temporary_directory(*args, **kwargs): try: from tempfile import TemporaryDirectory with TemporaryDirectory(*args, **kwargs) as name: yield name except ImportError: name = tempfile.mkdtemp(*args, **kwargs) yield name shutil.rmtree(name) PK!ΐ))poetry/utils/toml_file.py# -*- coding: utf-8 -*- import toml from poetry.toml import dumps from poetry.toml import loads from poetry.toml import TOMLFile from ._compat import Path class TomlFile: def __init__(self, path): self._path = Path(path) @property def path(self): return self._path def read(self, raw=False): # type: (bool) -> dict with self._path.open() as f: if raw: return toml.loads(f.read()) return loads(f.read()) def write(self, data): # type: (...) -> None if not isinstance(data, TOMLFile): data = toml.dumps(data) else: data = dumps(data) with self._path.open('w') as f: f.write(data) def __getattr__(self, item): return getattr(self._path, item) PK!5XЁ$$poetry/utils/venv.pyimport os import platform import subprocess import sys import sysconfig import warnings from contextlib import contextmanager from subprocess import CalledProcessError from poetry.config import Config from poetry.locations import CACHE_DIR from poetry.utils._compat import Path from poetry.utils._compat import decode class VenvError(Exception): pass class VenvCommandError(VenvError): def __init__(self, e): # type: (CalledProcessError) -> None message = 'Command {} errored with the following output: \n{}'.format( e.cmd, e.output.decode() ) super(VenvCommandError, self).__init__(message) class Venv(object): def __init__(self, venv=None): self._venv = venv if self._venv: self._venv = Path(self._venv) self._windows = sys.platform == 'win32' self._bin_dir = None if venv: bin_dir = 'bin' if not self._windows else 'Scripts' self._bin_dir = self._venv / bin_dir self._version_info = None self._python_implementation = None @classmethod def create(cls, io, name=None): # type: (...) -> Venv if 'VIRTUAL_ENV' not in os.environ: # Not in a virtualenv # Checking if we need to create one config = Config.create('config.toml') create_venv = config.setting('settings.virtualenvs.create') venv_path = config.setting('settings.virtualenvs.path') if venv_path is None: venv_path = Path(CACHE_DIR) / 'virtualenvs' else: venv_path = Path(venv_path) if not name: name = Path.cwd().name name = '{}-py{}'.format( name, '.'.join([str(v) for v in sys.version_info[:2]]) ) venv = venv_path / name if not venv.exists(): if create_venv is False: io.writeln( '' 'Skipping virtualenv creation, ' 'as specified in config file.' '' ) return cls() io.writeln( 'Creating virtualenv {} in {}'.format( name, str(venv_path) ) ) cls.build(str(venv)) else: if io.is_very_verbose(): io.writeln( 'Virtualenv {} already exists.'.format(name) ) os.environ['VIRTUAL_ENV'] = str(venv) # venv detection: # stdlib venv may symlink sys.executable, so we can't use realpath. # but others can symlink *to* the venv Python, # so we can't just use sys.executable. # So we just check every item in the symlink tree (generally <= 3) p = os.path.normcase(sys.executable) paths = [p] while os.path.islink(p): p = os.path.normcase( os.path.join(os.path.dirname(p), os.readlink(p))) paths.append(p) p_venv = os.path.normcase(os.environ['VIRTUAL_ENV']) if any(p.startswith(p_venv) for p in paths): # Running properly in the virtualenv, don't need to do anything return cls() venv = os.environ['VIRTUAL_ENV'] return cls(venv) @classmethod def build(cls, path): try: from venv import EnvBuilder builder = EnvBuilder(with_pip=True) build = builder.create except ImportError: # We fallback on virtualenv for Python 2.7 from virtualenv import create_environment build = create_environment build(path) @property def venv(self): return self._venv @property def python(self): # type: () -> str """ Path to current python executable """ return self._bin('python') @property def pip(self): # type: () -> str """ Path to current pip executable """ return self._bin('pip') @property def version_info(self): # type: () -> tuple if self._version_info is not None: return self._version_info if not self.is_venv(): self._version_info = sys.version_info else: output = self.run( 'python', '-c', '"import sys; print(\'.\'.join([str(s) for s in sys.version_info[:3]]))"', shell=True ) self._version_info = tuple([ int(s) for s in output.strip().split('.') ]) return self._version_info @property def python_implementation(self): if self._python_implementation is not None: return self._python_implementation if not self.is_venv(): impl = platform.python_implementation() else: impl = self.run( 'python', '-c', '"import platform; print(platform.python_implementation())"', shell=True ).strip() self._python_implementation = impl return self._python_implementation def config_var(self, var): if not self.is_venv(): try: return sysconfig.get_config_var(var) except IOError as e: warnings.warn("{0}".format(e), RuntimeWarning) return None try: value = self.run( 'python', '-c', '"import sysconfig; ' 'print(sysconfig.get_config_var(\'{}\'))"'.format(var), shell=True ).strip() except VenvCommandError as e: warnings.warn("{0}".format(e), RuntimeWarning) return None if value == 'None': value = None elif value == '1': value = 1 elif value == '0': value = 0 return value def run(self, bin, *args, **kwargs): """ Run a command inside the virtual env. """ cmd = [bin] + list(args) shell = kwargs.get('shell', False) call = kwargs.pop('call', False) if shell: cmd = ' '.join(cmd) try: if not self.is_venv(): if call: return subprocess.call( cmd, stderr=subprocess.STDOUT, **kwargs ) output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, **kwargs ) else: if self._windows: kwargs['shell'] = True with self.temp_environ(): os.environ['PATH'] = self._path() os.environ['VIRTUAL_ENV'] = str(self._venv) self.unset_env('PYTHONHOME') self.unset_env('__PYVENV_LAUNCHER__') if call: return subprocess.call( cmd, stderr=subprocess.STDOUT, **kwargs ) output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, **kwargs ) except CalledProcessError as e: raise VenvCommandError(e) return decode(output) def execute(self, bin, *args, **kwargs): if not self.is_venv(): return subprocess.call([bin] + list(args)) else: with self.temp_environ(): os.environ['PATH'] = self._path() os.environ['VIRTUAL_ENV'] = str(self._venv) self.unset_env('PYTHONHOME') self.unset_env('__PYVENV_LAUNCHER__') return subprocess.call([bin] + list(args), **kwargs) @contextmanager def temp_environ(self): environ = dict(os.environ) try: yield finally: os.environ.clear() os.environ.update(environ) def _path(self): return os.pathsep.join([ str(self._bin_dir), os.environ['PATH'], ]) def unset_env(self, key): if key in os.environ: del os.environ[key] def get_shell(self): shell = Path(os.environ.get('SHELL', '')).stem if shell in ('bash', 'zsh', 'fish'): return shell def _bin(self, bin): # type: (str) -> str """ Return path to the given executable. """ if not self.is_venv(): return bin return str(self._bin_dir / bin) + ('.exe' if self._windows else '') def is_venv(self): # type: () -> bool return self._venv is not None class NullVenv(Venv): def __init__(self, execute=False): super(NullVenv, self).__init__() self.executed = [] self._execute = execute def run(self, bin, *args): self.executed.append([bin] + list(args)) if self._execute: return super(NullVenv, self).run(bin, *args) def _bin(self, bin): return bin PK!lpoetry/vcs/__init__.pyfrom poetry.utils._compat import Path from .git import Git def get_vcs(directory): # type: (Path) -> Git directory = directory.resolve() for p in [directory] + list(directory.parents): if (p / '.git').is_dir(): return Git(p) PK!O# poetry/vcs/git.py# -*- coding: utf-8 -*- import re import subprocess from poetry.utils._compat import decode class GitConfig: def __init__(self): config_list = decode(subprocess.check_output( ['git', 'config', '-l'], stderr=subprocess.STDOUT )) self._config = {} m = re.findall('(?ms)^([^=]+)=(.*?)$', config_list) if m: for group in m: self._config[group[0]] = group[1] def get(self, key, default=None): return self._config.get(key, default) def __getitem__(self, item): return self._config[item] class Git: def __init__(self, work_dir=None): self._config = GitConfig() self._work_dir = work_dir @property def config(self): # type: () -> GitConfig return self._config def clone(self, repository, dest): # type: (...) -> str return self.run('clone', repository, dest) def checkout(self, rev, folder=None): # type: (...) -> str args = [] if folder is None and self._work_dir: folder = self._work_dir if folder: args += [ '--git-dir', (folder / '.git').as_posix(), '--work-tree', folder.as_posix() ] args += [ 'checkout', rev ] return self.run(*args) def rev_parse(self, rev, folder=None): # type: (...) -> str args = [] if folder is None and self._work_dir: folder = self._work_dir if folder: args += [ '--git-dir', (folder / '.git').as_posix(), '--work-tree', folder.as_posix() ] args += [ 'rev-parse', rev ] return self.run(*args) def get_ignored_files(self, folder=None): # type: (...) -> list args = [] if folder is None and self._work_dir: folder = self._work_dir if folder: args += [ '--git-dir', (folder / '.git').as_posix(), '--work-tree', folder.as_posix() ] args += [ 'ls-files', '--others', '-i', '--exclude-standard' ] output = self.run(*args) return output.split('\n') def run(self, *args): # type: (...) -> str return decode(subprocess.check_output( ['git'] + list(args), stderr=subprocess.STDOUT )) PK!kMMpoetry/version/__init__.pyimport operator from typing import Union from .exceptions import InvalidVersion from .legacy_version import LegacyVersion from .version import Version OP_EQ = operator.eq OP_LT = operator.lt OP_LE = operator.le OP_GT = operator.gt OP_GE = operator.ge OP_NE = operator.ne _trans_op = { '=': OP_EQ, '==': OP_EQ, '<': OP_LT, '<=': OP_LE, '>': OP_GT, '>=': OP_GE, '!=': OP_NE } def parse(version, # type: str strict=False # type: bool ): # type:(...) -> Union[Version, LegacyVersion] """ Parse the given version string and return either a :class:`Version` object or a LegacyVersion object depending on if the given version is a valid PEP 440 version or a legacy version. If strict=True only PEP 440 versions will be accepted. """ try: return Version(version) except InvalidVersion: if strict: raise return LegacyVersion(version) def version_compare(version1, version2, operator ): # type: (str, str, str) -> bool from poetry.semver.helpers import normalize_version if operator in _trans_op: operator = _trans_op[operator] elif operator in _trans_op.values(): pass else: raise ValueError('Invalid operator') version1 = parse(version1) version2 = parse(version2) try: version1 = parse(normalize_version(str(version1))) except ValueError: pass try: version2 = parse(normalize_version(str(version2))) except ValueError: pass return operator(version1, version2) PK!0Dpoetry/version/base.pyclass BaseVersion: def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, BaseVersion): return NotImplemented return method(self._key, other._key) PK!XE%,,poetry/version/exceptions.pyclass InvalidVersion(ValueError): pass PK!ZIpoetry/version/helpers.pyfrom poetry.semver.constraints import MultiConstraint from poetry.semver.version_parser import VersionParser PYTHON_VERSION = [ '2.7.*', '3.0.*', '3.1.*', '3.2.*', '3.3.*', '3.4.*', '3.5.*', '3.6.*', '3.7.*', '3.8.*', ] def format_python_constraint(constraint): """ This helper will help in transforming disjunctive constraint into proper constraint. """ if not isinstance(constraint, MultiConstraint): return str(constraint) has_disjunctive = False for c in constraint.constraints: if isinstance(c, MultiConstraint) and c.is_disjunctive(): has_disjunctive = True break parser = VersionParser() formatted = [] accepted = [] if not constraint.is_disjunctive() and not has_disjunctive: return str(constraint) for version in PYTHON_VERSION: version_constraint = parser.parse_constraints(version) matches = constraint.matches(version_constraint) if not matches: formatted.append('!=' + version) else: accepted.append(version) # Checking lower bound low = accepted[0] formatted.insert(0, '>=' + '.'.join(low.split('.')[:2])) return ', '.join(formatted) PK! poetry/version/legacy_version.pyimport re from .base import BaseVersion class LegacyVersion(BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False _legacy_version_component_re = re.compile( r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, ) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts PK!<poetry/version/markers.pyimport operator from pyparsing import ( ParseException, ParseResults, stringStart, stringEnd, ) from pyparsing import ZeroOrMore, Group, Forward, QuotedString from pyparsing import Literal as L # noqa class InvalidMarker(ValueError): """ An invalid marker was found, users should refer to PEP 508. """ class UndefinedComparison(ValueError): """ An invalid operation was attempted on a value that doesn't support it. """ class UndefinedEnvironmentName(ValueError): """ A name was attempted to be used that does not exist inside of the environment. """ class Node(object): def __init__(self, value): self.value = value def __str__(self): return str(self.value) def __repr__(self): return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) def serialize(self): raise NotImplementedError class Variable(Node): def serialize(self): return str(self) class Value(Node): def serialize(self): return '"{0}"'.format(self) class Op(Node): def serialize(self): return str(self) VARIABLE = ( L("implementation_version") | L("platform_python_implementation") | L("implementation_name") | L("python_full_version") | L("platform_release") | L("platform_version") | L("platform_machine") | L("platform_system") | L("python_version") | L("sys_platform") | L("os_name") | L("os.name") | # PEP-345 L("sys.platform") | # PEP-345 L("platform.version") | # PEP-345 L("platform.machine") | # PEP-345 L("platform.python_implementation") | # PEP-345 L("python_implementation") | # undocumented setuptools legacy L("extra") ) ALIASES = { 'os.name': 'os_name', 'sys.platform': 'sys_platform', 'platform.version': 'platform_version', 'platform.machine': 'platform_machine', 'platform.python_implementation': 'platform_python_implementation', 'python_implementation': 'platform_python_implementation' } VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) VERSION_CMP = ( L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") ) MARKER_OP = VERSION_CMP | L("not in") | L("in") MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) MARKER_VALUE = QuotedString("'") | QuotedString('"') MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) BOOLOP = L("and") | L("or") MARKER_VAR = VARIABLE | MARKER_VALUE MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) LPAREN = L("(").suppress() RPAREN = L(")").suppress() MARKER_EXPR = Forward() MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) MARKER = stringStart + MARKER_EXPR + stringEnd def _coerce_parse_result(results): if isinstance(results, ParseResults): return [_coerce_parse_result(i) for i in results] else: return results def _format_marker(marker, first=True): assert isinstance(marker, (list, tuple, str)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. if (isinstance(marker, list) and len(marker) == 1 and isinstance(marker[0], (list, tuple))): return _format_marker(marker[0]) if isinstance(marker, list): inner = (_format_marker(m, first=False) for m in marker) if first: return " ".join(inner) else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): return " ".join([m.serialize() for m in marker]) else: return marker _operators = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def format_full_version(info): version = '{0.major}.{0.minor}.{0.micro}'.format(info) kind = info.releaselevel if kind != 'final': version += kind[0] + str(info.serial) return version class Marker(object): def __init__(self, marker): try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( marker, marker[e.loc:e.loc + 8]) raise InvalidMarker(err_str) @property def markers(self): return self._markers def __str__(self): return _format_marker(self._markers) def __repr__(self): return "".format(str(self)) PK!X""poetry/version/requirements.py# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import string import re try: import urllib.parse as urlparse except ImportError: from urlparse import urlparse from pyparsing import ( stringStart, stringEnd, originalTextFor, ParseException ) from pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from pyparsing import Literal as L # noqa from poetry.semver.version_parser import VersionParser from .markers import MARKER_EXPR, Marker LEGACY_REGEX = ( r""" (?P(==|!=|<=|>=|<|>)) \s* (?P [^,;\s)]* # Since this is a "legacy" specifier, and the version # string can be just about anything, we match everything # except for whitespace, a semi-colon for marker support, # a closing paren since versions can be enclosed in # them, and a comma since it's a version separator. ) """ ) REGEX = ( r""" (?P(~=|==|!=|<=|>=|<|>|===)) (?P (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?".format(str(self)) PK! poetry/version/utils.pyclass Infinity(object): def __repr__(self): return "Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return False def __le__(self, other): return False def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return True def __ge__(self, other): return True def __neg__(self): return NegativeInfinity Infinity = Infinity() class NegativeInfinity(object): def __repr__(self): return "-Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return True def __le__(self, other): return True def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return False def __ge__(self, other): return False def __neg__(self): return Infinity NegativeInfinity = NegativeInfinity() PK!-Z*poetry/version/version.pyimport re from collections import namedtuple from itertools import dropwhile from .base import BaseVersion from .exceptions import InvalidVersion from .utils import Infinity _Version = namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"], ) VERSION_PATTERN = re.compile(""" ^ v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
            [-_.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_.]?
                (?Ppost|rev|r)
                [-_.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_.]?
            (?Pdev)
            [-_.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_.][a-z0-9]+)*))?       # local version
    $
""", re.IGNORECASE | re.VERBOSE)


class Version(BaseVersion):

    def __init__(self, version):
        # Validate the version and parse it into pieces
        match = VERSION_PATTERN.match(version)
        if not match:
            raise InvalidVersion("Invalid version: '{0}'".format(version))

        # Store the parsed out pieces of the version
        self._version = _Version(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=tuple(int(i) for i in match.group("release").split(".")),
            pre=_parse_letter_version(
                match.group("pre_l"),
                match.group("pre_n"),
            ),
            post=_parse_letter_version(
                match.group("post_l"),
                match.group("post_n1") or match.group("post_n2"),
            ),
            dev=_parse_letter_version(
                match.group("dev_l"),
                match.group("dev_n"),
            ),
            local=_parse_local_version(match.group("local")),
        )

        # Generate a key which will be used for sorting
        self._key = _cmpkey(
            self._version.epoch,
            self._version.release,
            self._version.pre,
            self._version.post,
            self._version.dev,
            self._version.local,
        )

    def __repr__(self):
        return "".format(repr(str(self)))

    def __str__(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        # Pre-release
        if self._version.pre is not None:
            parts.append("".join(str(x) for x in self._version.pre))

        # Post-release
        if self._version.post is not None:
            parts.append(".post{0}".format(self._version.post[1]))

        # Development release
        if self._version.dev is not None:
            parts.append(".dev{0}".format(self._version.dev[1]))

        # Local version segment
        if self._version.local is not None:
            parts.append(
                "+{0}".format(".".join(str(x) for x in self._version.local))
            )

        return "".join(parts)

    @property
    def public(self):
        return str(self).split("+", 1)[0]

    @property
    def base_version(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        return "".join(parts)

    @property
    def local(self):
        version_string = str(self)
        if "+" in version_string:
            return version_string.split("+", 1)[1]

    @property
    def is_prerelease(self):
        return bool(self._version.dev or self._version.pre)

    @property
    def is_postrelease(self):
        return bool(self._version.post)


def _parse_letter_version(letter, number):
    if letter:
        # We consider there to be an implicit 0 in a pre-release if there is
        # not a numeral associated with it.
        if number is None:
            number = 0

        # We normalize any letters to their lower case form
        letter = letter.lower()

        # We consider some words to be alternate spellings of other words and
        # in those cases we want to normalize the spellings to our preferred
        # spelling.
        if letter == "alpha":
            letter = "a"
        elif letter == "beta":
            letter = "b"
        elif letter in ["c", "pre", "preview"]:
            letter = "rc"
        elif letter in ["rev", "r"]:
            letter = "post"

        return letter, int(number)
    if not letter and number:
        # We assume if we are given a number, but we are not given a letter
        # then this is using the implicit post release syntax (e.g. 1.0-1)
        letter = "post"

        return letter, int(number)


_local_version_seperators = re.compile(r"[._-]")


def _parse_local_version(local):
    """
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    """
    if local is not None:
        return tuple(
            part.lower() if not part.isdigit() else int(part)
            for part in _local_version_seperators.split(local)
        )


def _cmpkey(epoch, release, pre, post, dev, local):
    # When we compare a release version, we want to compare it with all of the
    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    # leading zeros until we come to something non zero, then take the rest
    # re-reverse it back into the correct order and make it a tuple and use
    # that for our sorting key.
    release = tuple(
        reversed(list(
            dropwhile(
                lambda x: x == 0,
                reversed(release),
            )
        ))
    )

    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    # We'll do this by abusing the pre segment, but we _only_ want to do this
    # if there is not a pre or a post segment. If we have one of those then
    # the normal sorting rules will handle this case correctly.
    if pre is None and post is None and dev is not None:
        pre = -Infinity

    # Versions without a pre-release (except as noted above) should sort after
    # those with one.
    elif pre is None:
        pre = Infinity

    # Versions without a post segment should sort before those with one.
    if post is None:
        post = -Infinity

    # Versions without a development segment should sort after those with one.
    if dev is None:
        dev = Infinity

    if local is None:
        # Versions without a local segment should sort before those with one.
        local = -Infinity
    else:
        # Versions with a local segment need that segment parsed to implement
        # the sorting rules in PEP440.
        # - Alpha numeric segments sort before numeric segments
        # - Alpha numeric segments sort lexicographically
        # - Numeric segments sort numerically
        # - Shorter versions sort before longer versions when the prefixes
        #   match exactly
        local = tuple(
            (i, "") if isinstance(i, int) else (-Infinity, i)
            for i in local
        )

    return epoch, release, pre, post, dev, local
PK!ϔk	k	"poetry/version/version_selector.pyimport re
from typing import Union

from poetry.packages import Package
from poetry.semver.comparison import less_than
from poetry.semver.helpers import normalize_version
from poetry.semver.version_parser import VersionParser


class VersionSelector(object):

    def __init__(self, pool, parser=VersionParser()):
        self._pool = pool
        self._parser = parser

    def find_best_candidate(self,
                            package_name,                 # type: str
                            target_package_version=None,  # type:  Union[str, None]
                            allow_prereleases=False       # type: bool
                            ):  # type: (...) -> Union[Package, bool]
        """
        Given a package name and optional version,
        returns the latest Package that matches
        """
        if target_package_version:
            constraint = self._parser.parse_constraints(target_package_version)
        else:
            constraint = None

        candidates = self._pool.find_packages(package_name, constraint)

        if not candidates:
            return False

        # Select highest version if we have many
        package = candidates[0]
        for candidate in candidates:
            if candidate.is_prerelease() and not allow_prereleases:
                continue

            # Select highest version of the two
            if less_than(package.version, candidate.version):
                package = candidate

        return package

    def find_recommended_require_version(self, package):
        version = package.version

        return self._transform_version(version, package.pretty_version)

    def _transform_version(self, version, pretty_version):
        # attempt to transform 2.1.1 to 2.1
        # this allows you to upgrade through minor versions
        try:
            parts = normalize_version(version).split('.')
        except ValueError:
            return pretty_version

        # check to see if we have a semver-looking version
        if len(parts) == 4 and re.match('^0\D?', parts[3]):
            # remove the last parts (the patch version number and any extra)
            if parts[0] == '0':
                del parts[3]
            else:
                del parts[3]
                del parts[2]

            version = '.'.join(parts)
        else:
            return pretty_version

        return '^{}'.format(version)
PK!Hi&.'poetry-0.8.3.dist-info/entry_points.txtN+I/N.,()*O-)PzPi<..PK!Hrz{VWpoetry-0.8.3.dist-info/WHEEL
A
 н#V."jm)Afd~
dڡOsL`hAf`VJC˫w
 |PK!HMZsfTpoetry-0.8.3.dist-info/METADATA\rF[eI!oulgZ;qYdwTM`pLo&5ϐ?[/ϱo'hcf63%@_Ow.͗gq:-:o╞MܛAxU\'պYR3'kxW:oTT'"x1J:.Vz\D-'2OۺA"Mt^7uQI7⨛udr:u\=ͫ^i6Q61xҦDzz8?R=:	}pd?'~4:uD}{veU\}N^WUՊ"-hC
ySۇ?ɧ|r'MQ	8+u\iL_(C6fhVdu|NJu-e1k3]w,Jd"o"SGpM[t!
{jxuX*Ƕ9?<+yi.xU/H2ǥP;ݵ}
v)fcYI9P2+Q=Fs,[^?[帡?e4=S7]1;ǿ1mHzz0QvU׍Yуo[^:3<`Xͺ4;=sǝ?=UTO7?ZUZ5m={z$&':8oLI
B}qo%Lb50:[	/;K<EEgVMY5m}k
qSWi=NҰ,!ZaVH2ȑ14YցҕJXuԣ氭he|UԪJˆ\*hj}ԕ
ګ:\ͲX}v5}M}`sӶeYTbg<אԬ)KSftU՘DX|p:R$(`2O^+!a7qY*nH<1݇9Xnt:բ%Ijp@		Af/1H}_|39պй@bqH
`08 sqyV
z+Z"פ ռȲ=ٻ$SZ=VIe8;8һq.0CU|{v@{v<}{$JM1OyBtq@ްPHmtGS4z\M5x]\5LDqf=tt.+||UsZN|DVl>TAh'hJ"5՘2NI1o99)?ron&B۞vUiX|@FhiX;"τj5,GLFNǦs}=R*,}gLŞ0ƛ髃R5=zpsN18݅ߋ WZҮcԝj7]X\~&Oꇓx66#mZY$B2A=A\3o0uG?w?G=f#SUiQRmMҙSB*]lx	TWj50=41gGG	ci1w0<:p}v,Pxfjk3GZnśp5bB@4	|241V>poNVqmCd
I4#h ?P@@t͊j:h1.vAfcRF*2Ҏ*Nx1AsA7Kk8"I֒#b>

_E/|s7aC9%ށXBozzENDMf),PD< ϣ=kY-aQ{fNW6vےzC8	&^$%M#&V[=	*΀,y'3"8 <͚@Q%
‡%2ʛ z6&N	Ӱbh` BÊzQ?Y
$AArϊQZ{Y1%̃\g²:O1TpKl242%
eā4")WŬ>p~ x՝hÙѫ 905D-N<עwŸO
8Zᬟ_i5&Mo%A54iWS<#eIWH䄐
A	M[=
5@9>//q4g4GJpT
uʾ`1m6I>zdr8NB⃦ŕqj}I$vz"FH6,!Cwe&E4	OvHqdx˅ʺ@lfJZx҄W:΍ߏ,`43nYmF=gfE%떼	 {+G
^1{R:@qmtK!`R$&SmT<ZZ+=Ǥc_^CnޚU|V]kb)[K
+<Ć9G?:7E!RvbWyL-)	GA$D9mN*och!L"hPpiP{[T>8سLj) ѳk^~jE:@#e3[lBݳ@MEFOjCoP㚴4cĊ}$E8?4O8bQwL#	ۭ
v$	Pg:,hjOz=oG&0=\PeY,܆gc쁉6̿m
%ՙ=2zdK
+fNdťw10`B
|2s6؅Yy[
)W#`	-ε4{ϰ!#!7EbkB%?zq:,BY_
BpC:F\hxNse݋aɵ'[a&KxW#
ŗ5)Nǖn?uy?i)kȎ0a>';nˮ4K{;ڥLs~x{Dnj{X~gr;[+yCB)jf	ꅫ
vv&+0^u8kHqLZIteIS\Hf+FvY\*73~ғEڨ5TƨK6cU3|m	R(&6|rqRNf}Vk55aVU4Q߶M6(ɼ&/Ek
@UإAp->ՎOZǗHE
@FD!+bV{یD#{Hg?>mq꺵F館m?X>Y7^{֦
Hl|&gɨʊ0q!"|7TeA?TAZ[hX%7cFnx`$WjtϭHcr؏{	ӮK>sjlbcjL1%Gym#bھjtxt?<7OSiQs#N{J(V=Έ
vZiErg77fbA9`GLyu#~>*WC&Ra_gNd2"xǿ1Ъp0$dHܰ^b9"W 1ŨɠGa숉;oxfܘKrd#͞}^6
K}ZX
drBT[(RSK}
3sT$(0Cqv]}6howkV]L_,wMfV5,qSBv_XNG^ӞydlÊ[gك(xFAn[SC-iӭb7R!5= xةGj)Pshy^{]07~sbhGgF}\5qOF@)wk&>!!gDG?4 m_r1Wj3^%ɩ1L;yQR[|qu i6x2BW)[a,[;[אd2
ޤ vlͻʤLr
g$hf&Mh0qe$<|.%w_X~TJy; 9qfEPˏ͖Ds%݄O/	is6t~?GS)MZ+,PS6f	SgKlOJ݌f
_ȣS*kDn,۾Ex>gHezgGas!܎gWuA•fkб1&
4+KU?末WWiɁ#EG3肠P\T.ʌAkk}6JjhܡSˁղJckSOqcWM\Uou^68v@aDʛ[0~0έW/3U~ґČiiWIxoOկRHzXs
WCH6N#4I.m#9
*JoAϛ1uUYJi*FjOɡEU%S5%ݭDF4Ap4[ޡ]ltL#%lG]=Pxo$?O!?]|`3הad]h׻rщ޹SxgR~U8poL
ՐZ>4j̧*Q*W)fpa?oϛ|aEwrߛ||ݝ20LQJEߴ}>
U
)LjF<@
\]S
arZO^`PQ+&N?ElԀ.	/S
)WXq<8vrEnrFWHpH0R]E0FQRnn
J[/J_61ݘ /XFf
ǒa\b͓ۛWMv'Zr@w=KX|}x4;WwAU4=jJ<7%]VE_oG}$vsǁAzOE澠JبDM)'I?>uӕfNܰ+we`Ga?jEbITڟ~@BM0(50qYXeNr1$;ora"ھ{{em27aP"]%caJĔ߫
CrIYT:>C
_VHjqI,U俩^dhߛ5ze?%.wcT*R`pgyW5C2+_7
]Esث~/qp/{{nۨ78KWZ~J.PK!HӟL Apoetry-0.8.3.dist-info/RECORDɖȲY};@@@}߃hE6g$p77ipvJOąPN8Vp椳^ߛaiX9nwzcAj~J7>Fy=p@H⤢t-P=Œ;CAG=>/fAZ؏d%;ja(~5UUwnP0 E!g?^	BaQqGXFCk[O1,	&~oB}س|E_R4@dQIfHBo\J.LZ=樑 dzsXS		[	h:o#?M>x}II@>AaRf8čz /.ܞ>—֊vCUgت,SiP,8_$wO~p}E6f؆
xT`]?|Qr¡ L6S4=b=nLs#'|V'4-FԚP&`ی^6҉.9|ߤryh}l:t`qe}[]X֟	.._3}~`Nܫ8 ?kO1qXu>bb*#pGjwiEW|vۗ=6:	@{8\+-4+?D:ŒAYGx5՗ .+Zj#!>aN46E+(^YrogTΈwϾ~lwx]WvgSHX8+J\"--i6B[58$WtЩb3,m~?oo}`Y3rl"RhE4cX6Ep'>hsOl$2o+z^s^]v;-(ߡ1v{UnM 1+c9N;~0PZ`ۊ0|~X@'Zm2|{GpTC-/QYj۝oa/P\8fr;RqOl"M1AR"ݗS{SWVNŏͼ}hLjʜ*^Qkjֿ
dpޞf%.w2)2W*wAv&82&UwꌷE'C.:-qȈbd܎#/f[Ha-)Gmoq:}:=spp*p0mr$t蚃Ce<{9p\0mr.]kzUa7b-(VIK㖰mU}砘Urs*=_y>({0,lztWEC&MIy1کG))Mǹq
MX{e!|p$0mR>ǶzhD]ř'ABQY
-@Dzr>|9~
G6)wq.8۾t&uŴЁcn~D7ZEK`삲JH:CghAP22tS$0 xxTgҠ/a49JsY _GxO<+.$1$_RYznٜp^!E(ctӺd pp7ec܌#CV=܁perƖ`U}Ru"~!̛28F|-4:i߰AǕ=#=&{{~"iMorIO)jy\qD1
Tbb3\eNbw!-gj 
{ρ]V9SHb$=YA.s$;/Ɩu0
z8&3RށU>radTHqn)n,Zϖ
z0oB?~<g/ƳfIyzқ#)Q cO[-A-o_!u9]n8H-gۙ̉.Ը
	D<;@
&'浴>MJ\F޸N
P5#֐mbo2#aFڶ0cFˬt^9qw}ϟ1BszL-;tpqH.mqT㧝?sp4n8hܓ0{-EnZ7i 6ZJݐ5ම'lw*wkͮz,h?#PCq*ֹQ%ӳ _L/]%qLoΔ,=3cѼw77tN
U~\T"֧B]….ɺ[& ippwyfRh3a1B
m[w]Nq!~	S(^FY!JQSO-
4hlz>>ǖ?P#ȖTx(vBP'7&niU{P*ϲ#3if{Lޭ0rN!HS)7JE*0s"ą{P])I]{J@{_5#I}d>yJ)-ոbhm-kmӸ~wdd;Ϥqޓ.ؓldͰwߘ&ޖ꽸GoqX];^	ɐ޶R"8=ܲwD3҇x1% _92-jFo#0>9QZ[4& (:oZx{(s+
~}DLGܫ[L-̺XtCRp2$7co7l7+򜒞`tDwAvu@lVnv"7S-_*Fxj{ǽzvcus^?PeI谦IQ\L7m@J篬C|>kx

N-z*B+vgUɢkށG^|{ \ZDo[Zڿxot!^\!o)p#YF](u]rݸwi!Ș'+LSc	'I%É.S,f^~)ؚ.{?NHEUFo^Ç1L`.HZӆ|[zf%a9+sVW`]>n9Т?
_OՐ	BTbT-kD<`6lxq.e:o[4;xYDrAe+U݇9gT\"_ߤG=h3Pb@Y(mMpH0gim_O%ye0";шq'zh
lʲLхd@uҡh\ۚ[W(K
z:wDC|Ns}O@ϓx0tNPXb:Tr`B3n@V([dMXmC*|>	яYR<{eD{T "{%"=yzcw5SsZ-,6{`$zD*^[yX;whuKhŃ>i`5P`b:VB4v@|ϖgw0|^ra”eXv.㫣ZHsd|!.>ٶ9&g{m8rWHj:<3B'b~TټtWh(toc3!ޞPj|8YVIȼ5X]tRKUBuyW"}hp|eCJH	2o\Ylmî?e)wGq-VNZVbjmZ:tr€DVt.*:\Ԃ"
rS0á@DMMa**C9K2
on:IgWv]iN;UX2BIPI<.kN
і^]~7/E5w a*݅8]3D}	KO/Gj0`iSy]559U9>>l6H>c6X
7y:̽<ؖ}J(E8L.:'#
y8dKN#q`c[gȣW1ge4	XҒj("OM:#B Y[J9^=r)JˇqZ7B8_ ߟHN3SwyQ}a/~!_¿)YE9q"wNRh[Lj
K;	2nC0/.
\.{]LyH$NT(<R/Pl5=xrA8Ni-}~%(l8C· w
iD}(I1&;^88)rbEp:;)[\+>*CheǴ]ЂvD*gN61Yfy1zJ|׳R9\m
>pAm|5K$*ЏB,f1[pЭol¿wZfW
6مu;Jv	Iu[ϾcdqtU;.F-^iQJ[BA,.kX`\?nq?,.(JŽ
M&(G̣l~:(3hu[FL޻e$*~e.뒎bNs3g[">6	pex>>J8dбVU-DKfO.~o6
D1$w]
9K3~şjFtA[3cԤ~B	öJ4%t[Z67ȋ]WsooJ**9'?d#rY[a}F>~/8@9ig,G#[-
HwVthYӦ_ioϸ][-sTu$S`GGbWw׃,~DPvWz<@rHub*L	m
/Dv5d^iFඉ+.;[keH' cn6E8
;v@[9_*M3^1VS0".Q@#"$4*笿=Q͌__sPhx,9rOc?KXoU]f**pE!SԽCni)JMmo~q!7-,_/}(C1U:M]Z=x9|hoӢ?7џOKy/?mNig?<5w4idV!=(!YV]a?ogk	QƉ3~@QpA<[`ɧ&57D \(Յ"rTp'8j@U hFSvPK!poetry/__init__.pyPK!{Հ[[poetry/__main__.pyPK!'poetry/__version__.pyPK!뚩poetry/_vendor/.gitignorePK!F$zLL,poetry/config.pyPK!RR
poetry/console/__init__.pyPK!
\CC0poetry/console/application.pyPK!%#poetry/console/commands/__init__.pyPK!0  poetry/console/commands/about.pyPK!23poetry/console/commands/add.pyPK!Xغ 5poetry/console/commands/build.pyPK!_(FF 8poetry/console/commands/check.pyPK!cO"":poetry/console/commands/command.pyPK!W}!U<poetry/console/commands/config.pyPK!LL)YZpoetry/console/commands/debug/__init__.pyPK!zV%Zpoetry/console/commands/debug/info.pyPK!>8(`poetry/console/commands/debug/resolve.pyPK!"lpoetry/console/commands/install.pyPK!@2rpoetry/console/commands/lock.pyPK!е?hhFupoetry/console/commands/new.pyPK!"ypoetry/console/commands/publish.pyPK!o		!}poetry/console/commands/remove.pyPK!X@poetry/console/commands/run.pyPK!l.!poetry/console/commands/script.pyPK!\J!poetry/console/commands/search.pyPK!Z&&(upoetry/console/commands/self/__init__.pyPK!gq&poetry/console/commands/self/update.pyPK!k_##poetry/console/commands/show.pyPK!BZd0!Bpoetry/console/commands/update.pyPK!Y?MJJ'gpoetry/console/commands/venv_command.pyPK!n=="poetry/console/commands/version.pyPK!!spoetry/console/styles/__init__.pyPK!4``poetry/console/styles/poetry.pyPK!=N2ccOpoetry/exceptions.pyPK!!!poetry/installation/__init__.pyPK!,	%Bpoetry/installation/base_installer.pyPK!mHH hpoetry/installation/installer.pyPK!rHnn%W4poetry/installation/noop_installer.pyPK!t
t
$7poetry/installation/pip_installer.pyPK!YApoetry/io/__init__.pyPK!J
Bpoetry/io/null_io.pyPK!La77Dpoetry/io/raw_argv_input.pyPK!UFpoetry/json/__init__.pyPK!J*,&&&Fpoetry/json/schemas/poetry-schema.jsonPK!(yYmpoetry/layouts/__init__.pyPK!QNnpoetry/layouts/layout.pyPK!g{zpoetry/layouts/standard.pyPK!W2
h{poetry/locations.pyPK!ΨD|poetry/masonry/__init__.pyPK!}poetry/masonry/api.pyPK!ݱTTpoetry/masonry/builder.pyPK!i>$ff#qpoetry/masonry/builders/__init__.pyPK!N"poetry/masonry/builders/builder.pyPK!++#poetry/masonry/builders/complete.pyPK!]ދ)) poetry/masonry/builders/sdist.pyPK!u#,#, Wpoetry/masonry/builders/wheel.pyPK!(poetry/masonry/metadata.pyPK!5k!!%poetry/masonry/publishing/__init__.pyPK!f

&Spoetry/masonry/publishing/publisher.pyPK!cA""%Npoetry/masonry/publishing/uploader.pyPK! .poetry/masonry/utils/__init__.pyPK!ƨC88.poetry/masonry/utils/helpers.pyPK!M}a1poetry/masonry/utils/module.pyPK!УRyyL6poetry/masonry/utils/tags.pyPK!;8xMMLpoetry/mixology/__init__.pyPK!,M+JJMpoetry/mixology/conflict.pyPK!ּMM%Qpoetry/mixology/contracts/__init__.pyPK!3Qpoetry/mixology/contracts/specification_provider.pyPK!}]poetry/mixology/contracts/ui.pyPK!%7#apoetry/mixology/dependency_graph.pyPK!
>(npoetry/mixology/exceptions.pyPK!!~poetry/mixology/graph/__init__.pyPK!"R22~poetry/mixology/graph/action.pyPK!Owzz-/poetry/mixology/graph/add_edge_no_circular.pyPK!1II#poetry/mixology/graph/add_vertex.pyPK!j^^$~poetry/mixology/graph/delete_edge.pyPK!͍

,poetry/mixology/graph/detach_vertex_named.pyPK!Npoetry/mixology/graph/edge.pyPK!T0	0	poetry/mixology/graph/log.pyPK!$Ppoetry/mixology/graph/set_payload.pyPK!lZ9UU:poetry/mixology/graph/tag.pyPK!ڀ_	_	ɨpoetry/mixology/graph/vertex.pyPK!C_Nepoetry/mixology/helpers.pyPK!fSh"ipoetry/mixology/possibility_set.pyPK!supoetry/mixology/resolution.pyPK!|`vvj7poetry/mixology/resolver.pyPK!s4<poetry/mixology/state.pyPK!:!	Cpoetry/mixology/unwind_details.pyPK!gծffOpoetry/mixology/utils.pyPK!ΑPpoetry/packages/__init__.pyPK!'apoetry/packages/constraints/__init__.pyPK!
)==1apoetry/packages/constraints/generic_constraint.pyPK!\iBq==zspoetry/packages/dependency.pyPK!"poetry/packages/file_dependency.pyPK!!zyy7poetry/packages/locker.pyPK!d_#_#poetry/packages/package.pyPK!"~poetry/packages/project_package.pyPK!!poetry/packages/utils/__init__.pyPK!{?poetry/packages/utils/link.pyPK!3[
[
7poetry/packages/utils/utils.pyPK!I[!poetry/packages/vcs_dependency.pyPK!k
poetry/poetry.pyPK!-;}
poetry/puzzle/__init__.pyPK!`	@poetry/puzzle/exceptions.pyPK!,MYY$Wpoetry/puzzle/operations/__init__.pyPK!Md#poetry/puzzle/operations/install.pyPK!be^^%poetry/puzzle/operations/operation.pyPK!ct%Vpoetry/puzzle/operations/uninstall.pyPK!	룹"&poetry/puzzle/operations/update.pyPK!Ͻ##poetry/puzzle/provider.pyPK!qzZZBpoetry/puzzle/solver.pyPK!Ypoetry/puzzle/ui.pyPK!y::]poetry/repositories/__init__.pyPK!U=&^poetry/repositories/base_repository.pyPK!ҫ0oo+w`poetry/repositories/installed_repository.pyPK!)(/cpoetry/repositories/legacy_repository.pyPK! \1
1
hpoetry/repositories/pool.pyPK!z7z7&҉poetry/repositories/pypi_repository.pyPK!}j,	,	!poetry/repositories/repository.pyPK!8ESSpoetry/semver/__init__.pyPK!gnpoetry/semver/comparison.pyPK!䢬0%Rpoetry/semver/constraints/__init__.pyPK!!dd,poetry/semver/constraints/base_constraint.pyPK!AJ'poetry/semver/constraints/constraint.pyPK!"-poetry/semver/constraints/empty_constraint.pyPK!!-poetry/semver/constraints/multi_constraint.pyPK!qyр0Lpoetry/semver/constraints/wildcard_constraint.pyPK!4Fpoetry/semver/helpers.pyPK!8i[%%gpoetry/semver/version_parser.pyPK!Y&&,.poetry/spdx/__init__.pyPK!=|uu1poetry/spdx/data/licenses.jsonPK!]kpoetry/spdx/license.pyPK!>ԓXpoetry/spdx/updater.pyPK!HWpoetry/toml/__init__.pyPK!)œhpoetry/toml/array.pyPK!6poetry/toml/cascadedict.pyPK!oQpoetry/toml/freshtable.pyPK!Qpoetry/toml/peekableit.pyPK!+gŸ )poetry/toml/prettify/__init__.pyPK!y poetry/toml/prettify/_version.pyPK!css)ppoetry/toml/prettify/elements/__init__.pyPK!5		.*poetry/toml/prettify/elements/abstracttable.pyPK!BN&2poetry/toml/prettify/elements/array.pyPK!='0poetry/toml/prettify/elements/atomic.pyPK!/

'lpoetry/toml/prettify/elements/common.pyPK!Vػbb'poetry/toml/prettify/elements/errors.pyPK!AL(upoetry/toml/prettify/elements/factory.pyPK!--,&poetry/toml/prettify/elements/inlinetable.pyPK!(6		)H2poetry/toml/prettify/elements/metadata.pyPK!DC(&<poetry/toml/prettify/elements/table.pyPK!oR
R
,Mpoetry/toml/prettify/elements/tableheader.pyPK!2'663Zpoetry/toml/prettify/elements/traversal/__init__.pyPK!$#X<<59upoetry/toml/prettify/elements/traversal/predicates.pyPK!/|!!zpoetry/toml/prettify/errors.pyPK!
S&%}poetry/toml/prettify/lexer/__init__.pyPK!"+'''Lpoetry/toml/prettify/parser/__init__.pyPK!0W/poetry/toml/prettify/parser/elementsanitizer.pyPK!%poetry/toml/prettify/parser/errors.pyPK!@(66%œpoetry/toml/prettify/parser/parser.pyPK!^/**&poetry/toml/prettify/parser/recdesc.pyPK!AUU*poetry/toml/prettify/parser/tokenstream.pyPK!amm'poetry/toml/prettify/tokens/__init__.pyPK!V%Jpoetry/toml/prettify/tokens/errors.pyPK!NZ[>>&Ipoetry/toml/prettify/tokens/py2toml.pyPK!]

&poetry/toml/prettify/tokens/toml2py.pyPK!yۚ
poetry/toml/prettify/util.pyPK!)(poetry/toml/raw.pyPK!NN*poetry/toml/structurer.pyPK!{%%D:poetry/toml/toml_file.pyPK!p