PK!G܍)poetry/__init__.pyimport os import sys _ROOT = os.path.dirname(os.path.realpath(__file__)) _VENDOR = os.path.join(_ROOT, "_vendor") # Add vendored dependencies to path. sys.path.insert(0, _VENDOR) from .__version__ import __version__ # noqa PK!);[[poetry/__main__.pyimport sys if __name__ == "__main__": from .console import main sys.exit(main()) PK!Spoetry/__version__.py__version__ = "0.10.2" PK!뚩poetry/_vendor/.gitignore* !.gitignore PK!2LCCpoetry/config.pyfrom typing import Any from .locations import CONFIG_DIR from .utils._compat import Path from .utils.toml_file import TomlFile from .utils.toml_file import TOMLFile class Config: def __init__(self, file): # type: (TomlFile) -> None self._file = file if not self._file.exists(): self._raw_content = {} self._content = TOMLFile([]) else: self._raw_content = file.read(raw=True) self._content = file.read() @property def name(self): return str(self._file.path) @property def file(self): return self._file @property def raw_content(self): return self._raw_content @property def content(self): return self._content def setting(self, setting_name, default=None): # type: (str) -> Any """ Retrieve a setting value. """ keys = setting_name.split(".") config = self._raw_content for key in keys: if key not in config: return default config = config[key] return config def add_property(self, key, value): keys = key.split(".") config = self._content for i, key in enumerate(keys): if key not in config and i < len(keys) - 1: config[key] = {} if i == len(keys) - 1: config[key] = value break config = config[key] self.dump() def remove_property(self, key): keys = key.split(".") config = self._content for i, key in enumerate(keys): if key not in config: return if i == len(keys) - 1: del config[key] break config = config[key] self.dump() def dump(self): self._file.write(self._content) @classmethod def create(cls, file, base_dir=None): # type: (...) -> Config if base_dir is None: base_dir = CONFIG_DIR file = TomlFile(Path(base_dir) / file) return cls(file) PK!RRpoetry/console/__init__.pyfrom .application import Application def main(): return Application().run() PK! +qpoetry/console/application.pyimport os import re import sys import traceback from cleo import Application as BaseApplication from cleo.formatters import Formatter from cleo.inputs import ArgvInput from cleo.outputs import ConsoleOutput from cleo.outputs import Output from poetry import __version__ from poetry.io.raw_argv_input import RawArgvInput from .commands import AboutCommand from .commands import AddCommand from .commands import BuildCommand from .commands import CheckCommand from .commands import ConfigCommand from .commands import DevelopCommand from .commands import InitCommand from .commands import InstallCommand from .commands import LockCommand from .commands import NewCommand from .commands import PublishCommand from .commands import RemoveCommand from .commands import RunCommand from .commands import ScriptCommand from .commands import SearchCommand from .commands import ShowCommand from .commands import UpdateCommand from .commands import VersionCommand from .commands.cache import CacheClearCommand from .commands.debug import DebugInfoCommand from .commands.debug import DebugResolveCommand from .commands.self import SelfUpdateCommand class Application(BaseApplication): def __init__(self): super(Application, self).__init__("Poetry", __version__) self._poetry = None self._skip_io_configuration = False self._formatter = Formatter(True) self._formatter.add_style("error", "red", options=["bold"]) @property def poetry(self): from poetry.poetry import Poetry if self._poetry is not None: return self._poetry self._poetry = Poetry.create(os.getcwd()) return self._poetry def reset_poetry(self): # type: () -> None self._poetry = None def run(self, i=None, o=None): # type: (...) -> int if i is None: i = ArgvInput() if o is None: o = ConsoleOutput() self._formatter.with_colors(o.is_decorated()) o.set_formatter(self._formatter) name = i.get_first_argument() if name in ["run", "script"]: self._skip_io_configuration = True i = RawArgvInput() return super(Application, self).run(i, o) def do_run(self, i, o): name = self.get_command_name(i) if name not in ["run", "script"]: return super(Application, self).do_run(i, o) command = self.find(name) self._running_command = command status_code = command.run(i, o) self._running_command = None return status_code def configure_io(self, i, o): if self._skip_io_configuration: return super(Application, self).configure_io(i, o) def get_default_commands(self): # type: () -> list commands = super(Application, self).get_default_commands() commands += [ AboutCommand(), AddCommand(), BuildCommand(), CheckCommand(), ConfigCommand(), DevelopCommand(), InitCommand(), InstallCommand(), LockCommand(), NewCommand(), PublishCommand(), RemoveCommand(), RunCommand(), ScriptCommand(), SearchCommand(), ShowCommand(), UpdateCommand(), VersionCommand(), ] # Cache commands commands += [CacheClearCommand()] # Debug commands commands += [DebugInfoCommand(), DebugResolveCommand()] # Self commands commands += [SelfUpdateCommand()] return commands def render_exception(self, e, o): tb = traceback.extract_tb(sys.exc_info()[2]) title = "[%s] " % e.__class__.__name__ l = len(title) width = self._terminal.width if not width: width = sys.maxsize formatter = o.get_formatter() lines = [] for line in re.split("\r?\n", str(e)): for splitline in [ line[x : x + (width - 4)] for x in range(0, len(line), width - 4) ]: line_length = ( len(re.sub("\[[^m]*m", "", formatter.format(splitline))) + 4 ) lines.append((splitline, line_length)) l = max(line_length, l) messages = [] empty_line = formatter.format("%s" % (" " * l)) messages.append(empty_line) messages.append( formatter.format("%s%s" % (title, " " * max(0, l - len(title)))) ) for line in lines: messages.append( formatter.format( "%s %s" % (line[0], " " * (l - line[1])) ) ) messages.append(empty_line) o.writeln(messages, Output.OUTPUT_RAW) if Output.VERBOSITY_VERBOSE <= o.get_verbosity(): o.writeln("Exception trace:") for exc_info in tb: file_ = exc_info[0] line_number = exc_info[1] function = exc_info[2] line = exc_info[3] o.writeln( " %s in %s() " "at line %s" % (file_, function, line_number) ) o.writeln(" %s" % line) o.writeln("") if self._running_command is not None: o.writeln("%s" % self._running_command.get_synopsis()) o.writeln("") PK!*HH#poetry/console/commands/__init__.pyfrom .about import AboutCommand from .add import AddCommand from .build import BuildCommand from .check import CheckCommand from .config import ConfigCommand from .develop import DevelopCommand from .init import InitCommand from .install import InstallCommand from .lock import LockCommand from .new import NewCommand from .publish import PublishCommand from .remove import RemoveCommand from .run import RunCommand from .script import ScriptCommand from .search import SearchCommand from .show import ShowCommand from .update import UpdateCommand from .version import VersionCommand PK!Rs߯ poetry/console/commands/about.pyfrom .command import Command class AboutCommand(Command): """ Short information about Poetry. about """ def handle(self): self.line( """Poetry - Package Management for Python Poetry is a dependency manager tracking local dependencies of your projects and libraries. See https://github.com/sdispater/poetry for more information. """ ) PK!+Cpoetry/console/commands/add.pyfrom .init import InitCommand from .venv_command import VenvCommand class AddCommand(VenvCommand, InitCommand): """ Add a new dependency to pyproject.toml. add { name* : Packages to add. } { --D|dev : Add package as development dependency. } { --git= : The url of the Git repository. } { --path= : The path to a dependency. } { --E|extras=* : Extras to activate for the dependency. } { --optional : Add as an optional dependency. } { --python= : Python version( for which the dependencies must be installed. } { --platform= : Platforms for which the dependencies must be installed. } { --allow-prereleases : Accept prereleases. } { --dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } """ help = """The add command adds required packages to your pyproject.toml and installs them. If you do not specify a version constraint, poetry will choose a suitable one based on the available package versions. """ _loggers = ["poetry.repositories.pypi_repository"] def handle(self): from poetry.installation import Installer from poetry.semver import parse_constraint packages = self.argument("name") is_dev = self.option("dev") if (self.option("git") or self.option("path") or self.option("extras")) and len( packages ) > 1: raise ValueError( "You can only specify one package " "when using the --git or --path options" ) if self.option("git") and self.option("path"): raise RuntimeError("--git and --path cannot be used at the same time") section = "dependencies" if is_dev: section = "dev-dependencies" original_content = self.poetry.file.read() content = self.poetry.file.read() poetry_content = content["tool"]["poetry"] for name in packages: for key in poetry_content[section]: if key.lower() == name.lower(): raise ValueError("Package {} is already present".format(name)) if self.option("git") or self.option("path"): requirements = {packages[0]: ""} else: requirements = self._determine_requirements( packages, allow_prereleases=self.option("allow-prereleases") ) requirements = self._format_requirements(requirements) # validate requirements format for constraint in requirements.values(): parse_constraint(constraint) for name, constraint in requirements.items(): constraint = {"version": constraint} if self.option("git"): del constraint["version"] constraint["git"] = self.option("git") elif self.option("path"): del constraint["version"] constraint["path"] = self.option("path") if self.option("optional"): constraint["optional"] = True if self.option("allow-prereleases"): constraint["allows-prereleases"] = True if self.option("extras"): extras = [] for extra in self.option("extras"): if " " in extra: extras += [e.strip() for e in extra.split(" ")] else: extras.append(extra) constraint["extras"] = self.option("extras") if self.option("python"): constraint["python"] = self.option("python") if self.option("platform"): constraint["platform"] = self.option("platform") if len(constraint) == 1 and "version" in constraint: constraint = constraint["version"] poetry_content[section][name] = constraint # Write new content self.poetry.file.write(content) # Cosmetic new line self.line("") # Update packages self.reset_poetry() installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool, ) installer.dry_run(self.option("dry-run")) installer.update(True) installer.whitelist(requirements) try: status = installer.run() except Exception: self.poetry.file.write(original_content) raise if status != 0 or self.option("dry-run"): # Revert changes if not self.option("dry-run"): self.error( "\n" "Addition failed, reverting pyproject.toml " "to its original content." ) self.poetry.file.write(original_content) return status PK!Nj} poetry/console/commands/build.pyfrom .venv_command import VenvCommand class BuildCommand(VenvCommand): """ Builds a package, as a tarball and a wheel by default. build { --f|format= : Limit the format to either wheel or sdist. } """ def handle(self): from poetry.masonry import Builder fmt = "all" if self.option("format"): fmt = self.option("format") package = self.poetry.package self.line( "Building {} ({})".format( package.pretty_name, package.version ) ) builder = Builder(self.poetry, self.venv, self.output) builder.build(fmt) PK!Hu%%)poetry/console/commands/cache/__init__.pyfrom .clear import CacheClearCommand PK!Ց&poetry/console/commands/cache/clear.pyimport os from ..command import Command class CacheClearCommand(Command): """ Clears poetry's cache. cache:clear { cache : The name of the cache to clear. } { --all : Clear all caches. } """ def handle(self): from cachy import CacheManager from poetry.locations import CACHE_DIR cache = self.argument("cache") parts = cache.split(":") cache_dir = os.path.join(CACHE_DIR, "cache", "repositories", parts[0]) cache = CacheManager( { "default": parts[0], "serializer": "json", "stores": {parts[0]: {"driver": "file", "path": cache_dir}}, } ) if len(parts) == 1: if not self.option("all"): raise RuntimeError( "Add the --all option if you want to clear all " "{} caches".format(parts[0]) ) if not os.path.exists(cache_dir): self.line("No cache entries for {}".format(parts[0])) return 0 # Calculate number of entries entries_count = 0 for path, dirs, files in os.walk(cache_dir): entries_count += len(files) delete = self.confirm( "Delete {} entries?".format(entries_count) ) if not delete: return 0 cache.flush() elif len(parts) == 2: raise RuntimeError( "Only specifying the package name is not yet supported. " "Add a specific version to clear" ) elif len(parts) == 3: package = parts[1] version = parts[2] if not cache.has("{}:{}".format(package, version)): self.line("No cache entries for {}:{}".format(package, version)) return 0 delete = self.confirm("Delete cache entry {}:{}".format(package, version)) if not delete: return 0 cache.forget("{}:{}".format(package, version)) else: raise ValueError("Invalid cache key") PK!FF poetry/console/commands/check.pyfrom .command import Command class CheckCommand(Command): """ Checks the validity of the pyproject.toml file. check """ def handle(self): # Load poetry and display errors, if any self.poetry.check(self.poetry.local_config, strict=True) self.info("All set!") PK!1"Q Q "poetry/console/commands/command.pyimport logging from cleo import Command as BaseCommand from ..styles.poetry import PoetryStyle class CommandFormatter(logging.Formatter): _colors = {"error": "fg=red", "warning": "fg=yellow", "debug": "fg=blue"} def format(self, record): if not record.exc_info: level = record.levelname.lower() msg = record.msg if level in self._colors: msg = "<{}>{}".format(self._colors[level], msg) return msg return super(CommandFormatter, self).format(record) class CommandHandler(logging.Handler): def __init__(self, command): self._command = command output = self._command.output level = logging.WARNING if output.is_debug(): level = logging.DEBUG elif output.is_very_verbose() or output.is_verbose(): level = logging.INFO super(CommandHandler, self).__init__(level) def emit(self, record): try: msg = self.format(record) level = record.levelname.lower() err = level in ("warning", "error", "exception", "critical") if err: self._command.output.write_error(msg, newline=True) else: self._command.line(msg) except Exception: self.handleError(record) class Command(BaseCommand): _loggers = [] @property def poetry(self): return self.get_application().poetry def reset_poetry(self): # type: () -> None self.get_application().reset_poetry() def run(self, i, o): # type: () -> int """ Initialize command. """ self.input = i self.output = PoetryStyle(i, o) for logger in self._loggers: self.register_logger(logging.getLogger(logger)) return super(BaseCommand, self).run(i, o) def register_logger(self, logger): """ Register a new logger. """ handler = CommandHandler(self) handler.setFormatter(CommandFormatter()) logger.handlers = [handler] logger.propagate = False output = self.output level = logging.WARNING if output.is_debug(): level = logging.DEBUG elif output.is_very_verbose() or output.is_verbose(): level = logging.INFO logger.setLevel(level) PK!-3!poetry/console/commands/config.pyimport json import re from .command import Command TEMPLATE = """[settings] [repositories] """ AUTH_TEMPLATE = """[http-basic] """ class ConfigCommand(Command): """ Sets/Gets config options. config { key : Setting key. } { value?* : Setting value. } { --list : List configuration settings } { --unset : Unset configuration setting } """ help = """This command allows you to edit the poetry config settings and repositories.. To add a repository: poetry repositories.foo https://bar.com/simple/ To remove a repository (repo is a short alias for repositories): poetry --unset repo.foo """ def __init__(self): from poetry.config import Config super(ConfigCommand, self).__init__() self._config = Config.create("config.toml") self._auth_config = Config.create("auth.toml") def initialize(self, i, o): super(ConfigCommand, self).initialize(i, o) # Create config file if it does not exist if not self._config.file.exists(): self._config.file.parent.mkdir(parents=True, exist_ok=True) with self._config.file.open("w") as f: f.write(TEMPLATE) if not self._auth_config.file.exists(): self._auth_config.file.parent.mkdir(parents=True, exist_ok=True) with self._auth_config.file.open("w") as f: f.write(AUTH_TEMPLATE) def handle(self): if self.option("list"): self._list_configuration(self._config.raw_content) return 0 setting_key = self.argument("key") if not setting_key: return 0 if self.argument("value") and self.option("unset"): raise RuntimeError("You can not combine a setting value with --unset") # show the value if no value is provided if not self.argument("value") and not self.option("unset"): m = re.match("^repos?(?:itories)?(?:\.(.+))?", self.argument("key")) if m: if not m.group(1): value = {} if self._config.setting("repositories") is not None: value = self._config.setting("repositories") else: repo = self._config.setting("repositories.{}".format(m.group(1))) if repo is None: raise ValueError( "There is no {} repository defined".format(m.group(1)) ) value = repo self.line(str(value)) return 0 values = self.argument("value") boolean_validator = lambda val: val in {"true", "false", "1", "0"} boolean_normalizer = lambda val: True if val in ["true", "1"] else False unique_config_values = { "settings.virtualenvs.create": (boolean_validator, boolean_normalizer), "settings.virtualenvs.in-project": (boolean_validator, boolean_normalizer), "settings.pypi.fallback": (boolean_validator, boolean_normalizer), } if setting_key in unique_config_values: if self.option("unset"): return self._remove_single_value(setting_key) return self._handle_single_value( setting_key, unique_config_values[setting_key], values ) # handle repositories m = re.match("^repos?(?:itories)?(?:\.(.+))?", self.argument("key")) if m: if not m.group(1): raise ValueError("You cannot remove the [repositories] section") if self.option("unset"): repo = self._config.setting("repositories.{}".format(m.group(1))) if repo is None: raise ValueError( "There is no {} repository defined".format(m.group(1)) ) self._config.remove_property("repositories.{}".format(m.group(1))) return 0 if len(values) == 1: url = values[0] self._config.add_property("repositories.{}.url".format(m.group(1)), url) return 0 raise ValueError( "You must pass the url. " "Example: poetry config repositories.foo https://bar.com" ) # handle auth m = re.match("^(http-basic)\.(.+)", self.argument("key")) if m: if self.option("unset"): if not self._auth_config.setting( "{}.{}".format(m.group(1), m.group(2)) ): raise ValueError( "There is no {} {} defined".format(m.group(2), m.group(1)) ) self._auth_config.remove_property( "{}.{}".format(m.group(1), m.group(2)) ) return 0 if m.group(1) == "http-basic": if len(values) == 1: username = values[0] # Only username, so we prompt for password password = self.secret("Password:") elif len(values) != 2: raise ValueError( "Expected one or two arguments " "(username, password), got {}".format(len(values)) ) else: username = values[0] password = values[1] self._auth_config.add_property( "{}.{}".format(m.group(1), m.group(2)), {"username": username, "password": password}, ) return 0 raise ValueError("Setting {} does not exist".format(self.argument("key"))) def _handle_single_value(self, key, callbacks, values): validator, normalizer = callbacks if len(values) > 1: raise RuntimeError("You can only pass one value.") value = values[0] if not validator(value): raise RuntimeError('"{}" is an invalid value for {}'.format(value, key)) self._config.add_property(key, normalizer(value)) return 0 def _remove_single_value(self, key): self._config.remove_property(key) return 0 def _list_configuration(self, contents, k=None): orig_k = k for key, value in contents.items(): if k is None and key not in ["config", "repositories", "settings"]: continue if isinstance(value, dict) or key == "repositories" and k is None: if k is None: k = "" k += re.sub("^config\.", "", key + ".") self._list_configuration(value, k=k) k = orig_k continue if isinstance(value, list): value = [ json.dumps(val) if isinstance(val, list) else val for val in value ] value = "[{}]".format(", ".join(value)) value = json.dumps(value) self.line( "[{}] {}".format((k or "") + key, value) ) PK!LL)poetry/console/commands/debug/__init__.pyfrom .info import DebugInfoCommand from .resolve import DebugResolveCommand PK!NP![%poetry/console/commands/debug/info.pyimport os import sys from ..venv_command import VenvCommand class DebugInfoCommand(VenvCommand): """ Shows debug information. debug:info """ def handle(self): poetry = self.poetry package = poetry.package venv = self.venv poetry_python_version = ".".join(str(s) for s in sys.version_info[:3]) self.output.title("Poetry") self.output.listing( [ "Version: {}".format(poetry.VERSION), "Python: {}".format(poetry_python_version), ] ) self.line("") venv_python_version = ".".join(str(s) for s in venv.version_info[:3]) self.output.title("Virtualenv") self.output.listing( [ "Python: {}".format( venv_python_version ), "Implementation: {}".format( venv.python_implementation ), "Path: {}".format( venv.venv if venv.is_venv() else "NA" ), ] ) self.line("") self.output.title("System") self.output.listing( [ "Platform: {}".format(sys.platform), "OS: {}".format(os.name), ] ) self.line("") PK!~Ӛ,,(poetry/console/commands/debug/resolve.pyimport re from typing import List from ..command import Command class DebugResolveCommand(Command): """ Debugs dependency resolution. debug:resolve { package?* : packages to resolve. } { --E|extras=* : Extras to activate for the dependency. } { --python= : Python version(s) to use for resolution. } """ _loggers = ["poetry.repositories.pypi_repository"] def handle(self): from poetry.packages import Dependency from poetry.packages import ProjectPackage from poetry.puzzle import Solver from poetry.repositories.repository import Repository from poetry.semver import parse_constraint packages = self.argument("package") if not packages: package = self.poetry.package else: requirements = self._determine_requirements(packages) requirements = self._format_requirements(requirements) # validate requirements format for constraint in requirements.values(): parse_constraint(constraint) dependencies = [] for name, constraint in requirements.items(): dep = Dependency(name, constraint) extras = [] for extra in self.option("extras"): if " " in extra: extras += [e.strip() for e in extra.split(" ")] else: extras.append(extra) for ex in extras: dep.extras.append(ex) dependencies.append(dep) package = ProjectPackage( self.poetry.package.name, self.poetry.package.version ) package.python_versions = ( self.option("python") or self.poetry.package.python_versions ) for dep in dependencies: package.requires.append(dep) solver = Solver( package, self.poetry.pool, Repository(), Repository(), self.output ) ops = solver.solve() self.line("") self.line("Resolution results:") self.line("") for op in ops: package = op.package self.line( " - {} ({})".format( package.name, package.version ) ) def _determine_requirements(self, requires): # type: (List[str]) -> List[str] if not requires: return [] requires = self._parse_name_version_pairs(requires) result = [] for requirement in requires: if "version" not in requirement: requirement["version"] = "*" result.append("{} {}".format(requirement["name"], requirement["version"])) return result def _parse_name_version_pairs(self, pairs): # type: (list) -> list result = [] for i in range(len(pairs)): pair = re.sub("^([^=: ]+)[=: ](.*)$", "\\1 \\2", pairs[i].strip()) pair = pair.strip() if " " in pair: name, version = pair.split(" ", 2) result.append({"name": name, "version": version}) else: result.append({"name": pair}) return result def _format_requirements(self, requirements): # type: (List[str]) -> dict requires = {} requirements = self._parse_name_version_pairs(requirements) for requirement in requirements: requires[requirement["name"]] = requirement["version"] return requires PK!X466"poetry/console/commands/develop.pyimport os from .venv_command import VenvCommand class DevelopCommand(VenvCommand): """ Installs the current project in development mode. develop """ help = """\ The develop command installs the current project in development mode. """ def handle(self): from poetry.masonry.builders import SdistBuilder from poetry.io import NullIO from poetry.utils._compat import decode from poetry.utils.venv import NullVenv setup = self.poetry.file.parent / "setup.py" has_setup = setup.exists() if has_setup: self.line("A setup.py file already exists. Using it.") else: builder = SdistBuilder(self.poetry, NullVenv(), NullIO()) with setup.open("w") as f: f.write(decode(builder.build_setup())) try: self._install(setup) finally: if not has_setup: os.remove(str(setup)) def _install(self, setup): self.call("install") self.line( "Installing {} ({})".format( self.poetry.package.pretty_name, self.poetry.package.pretty_version ) ) self.venv.run("pip", "install", "-e", str(setup.parent), "--no-deps") PK!>(>(poetry/console/commands/init.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import re from typing import List from typing import Tuple from .command import Command from .venv_command import VenvCommand class InitCommand(Command): """ Creates a basic pyproject.toml file in the current directory. init {--name= : Name of the package} {--description= : Description of the package} {--author= : Author name of the package} {--dependency=* : Package to require with an optional version constraint, e.g. requests:^2.10.0 or requests=2.11.1} {--dev-dependency=* : Package to require for development with an optional version constraint, e.g. requests:^2.10.0 or requests=2.11.1} {--l|license= : License of the package} """ help = """\ The init command creates a basic pyproject.toml file in the current directory. """ def __init__(self): super(InitCommand, self).__init__() self._pool = None def handle(self): from poetry.layouts import layout from poetry.utils._compat import Path from poetry.vcs.git import GitConfig if (Path.cwd() / "pyproject.toml").exists(): self.error("A pyproject.toml file already exists.") return 1 vcs_config = GitConfig() self.line( [ "", "This command will guide you through creating your poetry.toml config.", "", ] ) name = self.option("name") if not name: name = Path.cwd().name.lower() question = self.create_question( "Package name [{}]: ".format(name), default=name ) name = self.ask(question) version = "0.1.0" question = self.create_question( "Version [{}]: ".format(version), default=version ) version = self.ask(question) description = self.option("description") or "" question = self.create_question( "Description [{}]: ".format(description), default=description, ) description = self.ask(question) author = self.option("author") if not author and vcs_config and vcs_config.get("user.name"): author = vcs_config["user.name"] author_email = vcs_config.get("user.email") if author_email: author += " <{}>".format(author_email) question = self.create_question( "Author [{}, n to skip]: ".format(author), default=author ) question.validator = lambda v: self._validate_author(v, author) author = self.ask(question) if not author: authors = [] else: authors = [author] license = self.option("license") or "" question = self.create_question( "License [{}]: ".format(license), default=license ) license = self.ask(question) question = self.create_question("Compatible Python versions [*]: ", default="*") python = self.ask(question) self.line("") requirements = [] question = "Would you like to define your dependencies" " (require) interactively?" if self.confirm(question, True): requirements = self._format_requirements( self._determine_requirements(self.option("dependency")) ) dev_requirements = [] question = "Would you like to define your dev dependencies" " (require-dev) interactively" if self.confirm(question, True): dev_requirements = self._format_requirements( self._determine_requirements(self.option("dev-dependency")) ) layout_ = layout("standard")( name, version, description=description, author=authors[0] if authors else None, license=license, python=python, dependencies=requirements, dev_dependencies=dev_requirements, ) content = layout_.generate_poetry_content() if self.input.is_interactive(): self.line("Generated file") self.line(["", content, ""]) if not self.confirm("Do you confirm generation?", True): self.line("Command aborted") return 1 with (Path.cwd() / "pyproject.toml").open("w") as f: f.write(content) def _determine_requirements( self, requires, allow_prereleases=False # type: List[str] # type: bool ): # type: (...) -> List[str] if not requires: requires = [] package = self.ask("Search for package:") while package is not None: matches = self._get_pool().search(package) if not matches: self.line("Unable to find package") package = False else: choices = [] for found_package in matches: choices.append(found_package.pretty_name) self.line( "Found {} packages matching {}".format( len(matches), package ) ) package = self.choice( "\nEnter package # to add, or the complete package name if it is not listed", choices, attempts=3, ) # no constraint yet, determine the best version automatically if package is not False and " " not in package: question = self.create_question( "Enter the version constraint to require " "(or leave blank to use the latest version):" ) question.attempts = 3 question.validator = lambda x: (x or "").strip() or False constraint = self.ask(question) if constraint is False: _, constraint = self._find_best_version_for_package(package) self.line( "Using version {} for {}".format( constraint, package ) ) package += " {}".format(constraint) if package is not False: requires.append(package) package = self.ask("\nSearch for a package:") return requires requires = self._parse_name_version_pairs(requires) result = [] for requirement in requires: if "version" not in requirement: # determine the best version automatically name, version = self._find_best_version_for_package( requirement["name"], allow_prereleases=allow_prereleases ) requirement["version"] = version requirement["name"] = name self.line( "Using version {} for {}".format(version, name) ) else: # check that the specified version/constraint exists # before we proceed name, _ = self._find_best_version_for_package( requirement["name"], requirement["version"], allow_prereleases=allow_prereleases, ) requirement["name"] = name result.append("{} {}".format(requirement["name"], requirement["version"])) return result def _find_best_version_for_package( self, name, required_version=None, allow_prereleases=False ): # type: (...) -> Tuple[str, str] from poetry.version.version_selector import VersionSelector selector = VersionSelector(self._get_pool()) package = selector.find_best_candidate( name, required_version, allow_prereleases=allow_prereleases ) if not package: # TODO: find similar raise ValueError( "Could not find a matching version of package {}".format(name) ) return (package.pretty_name, selector.find_recommended_require_version(package)) def _parse_name_version_pairs(self, pairs): # type: (list) -> list result = [] for i in range(len(pairs)): pair = re.sub("^([^=: ]+)[=: ](.*)$", "\\1 \\2", pairs[i].strip()) pair = pair.strip() if " " in pair: name, version = pair.split(" ", 2) result.append({"name": name, "version": version}) else: result.append({"name": pair}) return result def _format_requirements(self, requirements): # type: (List[str]) -> dict requires = {} requirements = self._parse_name_version_pairs(requirements) for requirement in requirements: requires[requirement["name"]] = requirement["version"] return requires def _validate_author(self, author, default): from poetry.packages.package import AUTHOR_REGEX author = author or default if author in ["n", "no"]: return m = AUTHOR_REGEX.match(author) if not m: raise ValueError( "Invalid author string. Must be in the format: " "John Smith " ) return author def _get_pool(self): from poetry.repositories import Pool from poetry.repositories.pypi_repository import PyPiRepository if isinstance(self, VenvCommand): return self.poetry.pool if self._pool is None: self._pool = Pool() self._pool.add_repository(PyPiRepository()) return self._pool PK!'GEE"poetry/console/commands/install.pyfrom .venv_command import VenvCommand class InstallCommand(VenvCommand): """ Installs the project dependencies. install { --no-dev : Do not install dev dependencies. } { --dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } { --E|extras=* : Extra sets of dependencies to install. } { --develop=* : Install given packages in development mode. } """ help = """The install command reads the pyproject.toml file from the current directory, processes it, and downloads and installs all the libraries and dependencies outlined in that file. If the file does not exist it will look for pyproject.toml and do the same. poetry install """ _loggers = ["poetry.repositories.pypi_repository"] def handle(self): from poetry.installation import Installer installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool, ) extras = [] for extra in self.option("extras"): if " " in extra: extras += [e.strip() for e in extra.split(" ")] else: extras.append(extra) installer.extras(extras) installer.dev_mode(not self.option("no-dev")) installer.develop(self.option("develop")) installer.dry_run(self.option("dry-run")) installer.verbose(self.option("verbose")) return installer.run() PK!Rpoetry/console/commands/lock.pyfrom .venv_command import VenvCommand class LockCommand(VenvCommand): """ Locks the project dependencies. lock """ help = """The lock command reads the pyproject.toml file from the current directory, processes it, and locks the depdencies in the pyproject.lock file. poetry lock """ _loggers = ["poetry.repositories.pypi_repository"] def handle(self): from poetry.installation import Installer installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool, ) installer.update(True) installer.execute_operations(False) return installer.run() PK!I poetry/console/commands/new.pyfrom .command import Command class NewCommand(Command): """ Creates a new Python project at new { path : The path to create the project at. } { --name : Set the resulting package name. } { --src : Use the src layout for the project. } """ def handle(self): from poetry.layouts import layout from poetry.utils._compat import Path from poetry.vcs.git import GitConfig if self.option("src"): layout_ = layout("src") else: layout_ = layout("standard") path = Path.cwd() / Path(self.argument("path")) name = self.option("name") if not name: name = path.name if path.exists(): if list(path.glob("*")): # Directory is not empty. Aborting. raise RuntimeError( "Destination {} " "exists and is not empty".format(path) ) readme_format = "rst" config = GitConfig() author = None if config.get("user.name"): author = config["user.name"] author_email = config.get("user.email") if author_email: author += " <{}>".format(author_email) layout_ = layout_(name, "0.1.0", author=author, readme_format=readme_format) layout_.create(path) self.line( "Created package {} in {}".format( name, path.relative_to(Path.cwd()) ) ) PK!2"poetry/console/commands/publish.pyfrom .command import Command class PublishCommand(Command): """ Publishes a package to a remote repository. publish { --r|repository= : The repository to publish the package to. } { --u|username= : The username to access the repository. } { --p|password= : The password to access the repository. } { --build : Build the package before publishing. } """ help = """The publish command builds and uploads the package to a remote repository. By default, it will upload to PyPI but if you pass the --repository option it will upload to it instead. The --repository option should match the name of a configured repository using the config command. """ def handle(self): from poetry.masonry.publishing.publisher import Publisher publisher = Publisher(self.poetry, self.output) # Building package first, if told if self.option("build"): if publisher.files: if not self.confirm( "There are {} files ready for publishing. " "Build anyway?".format(len(publisher.files)) ): self.line_error("Aborted!") return 1 self.call("build") files = publisher.files if not files: self.line_error( "No files to publish. " "Run poetry build first or use the --build option." ) return 1 self.line("") publisher.publish( self.option("repository"), self.option("username"), self.option("password") ) PK!J !poetry/console/commands/remove.pyfrom .venv_command import VenvCommand class RemoveCommand(VenvCommand): """ Removes a package from the project dependencies. remove { packages* : Packages that should be removed. } {--D|dev : Removes a package from the development dependencies. } {--dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } """ help = """The remove command removes a package from the current list of installed packages poetry remove""" _loggers = ["poetry.repositories.pypi_repository"] def handle(self): from poetry.installation import Installer packages = self.argument("packages") is_dev = self.option("dev") original_content = self.poetry.file.read() content = self.poetry.file.read() poetry_content = content["tool"]["poetry"] section = "dependencies" if is_dev: section = "dev-dependencies" # Deleting entries requirements = {} for name in packages: found = False for key in poetry_content[section]: if key.lower() == name.lower(): found = True requirements[name] = poetry_content[section][name] break if not found: raise ValueError("Package {} not found".format(name)) for key in requirements: del poetry_content[section][key] # Write the new content back self.poetry.file.write(content) # Update packages self.reset_poetry() installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool, ) installer.dry_run(self.option("dry-run")) installer.update(True) installer.whitelist(requirements) try: status = installer.run() except Exception: self.poetry.file.write(original_content) raise if status != 0 or self.option("dry-run"): # Revert changes if not self.option("dry-run"): self.error( "\n" "Removal failed, reverting pyproject.toml " "to its original content." ) self.poetry.file.write(original_content) return status PK!35poetry/console/commands/run.pyfrom .venv_command import VenvCommand class RunCommand(VenvCommand): """ Runs a command in the appropriate environment. run { args* : The command and arguments/options to run. } """ def handle(self): args = self.argument("args") script = args[0] scripts = self.poetry.local_config.get("scripts") if scripts and script in scripts: return self.run_script(scripts[script], args) venv = self.venv return venv.execute(*args) def run_script(self, script, args): module, callable_ = script.split(":") src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else "" cmd = ["python", "-c"] cmd += [ '"import sys; ' "from importlib import import_module; " "sys.argv = {!r}; {}" "import_module('{}').{}()\"".format( args, src_in_sys_path, module, callable_ ) ] return self.venv.run(*cmd, shell=True, call=True) @property def _module(self): from ...masonry.utils.module import Module poetry = self.poetry package = poetry.package path = poetry.file.parent module = Module(package.name, path.as_posix()) return module def merge_application_definition(self, merge_args=True): if self._application is None or ( self._application_definition_merged and (self._application_definition_merged_with_args or not merge_args) ): return if merge_args: current_arguments = self._definition.get_arguments() self._definition.set_arguments( self._application.get_definition().get_arguments() ) self._definition.add_arguments(current_arguments) self._application_definition_merged = True if merge_args: self._application_definition_merged_with_args = True PK!!F!poetry/console/commands/script.pyfrom .venv_command import VenvCommand class ScriptCommand(VenvCommand): """ Executes a script defined in pyproject.toml. (Deprecated) script { script-name : The name of the script to execute } { args?* : The command and arguments/options to pass to the script. } """ def handle(self): self.line("script is deprecated use run instead.") self.line("") script = self.argument("script-name") argv = [script] + self.argument("args") scripts = self.poetry.local_config.get("scripts") if not scripts: raise RuntimeError("No scripts defined in pyproject.toml") if script not in scripts: raise ValueError("Script {} is not defined".format(script)) module, callable_ = scripts[script].split(":") src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else "" cmd = ["python", "-c"] cmd += [ '"import sys; ' "from importlib import import_module; " "sys.argv = {!r}; {}" "import_module('{}').{}()\"".format( argv, src_in_sys_path, module, callable_ ) ] self.venv.run(*cmd, shell=True, call=True) @property def _module(self): from ...masonry.utils.module import Module poetry = self.poetry package = poetry.package path = poetry.file.parent module = Module(package.name, path.as_posix()) return module def merge_application_definition(self, merge_args=True): if self._application is None or ( self._application_definition_merged and (self._application_definition_merged_with_args or not merge_args) ): return if merge_args: current_arguments = self._definition.get_arguments() self._definition.set_arguments( self._application.get_definition().get_arguments() ) self._definition.add_arguments(current_arguments) self._application_definition_merged = True if merge_args: self._application_definition_merged_with_args = True PK!@zII!poetry/console/commands/search.pyfrom .command import Command class SearchCommand(Command): """ Searches for packages on remote repositories. search { tokens* : The tokens to search for. } { --N|only-name : Search only in name. } """ def handle(self): from poetry.repositories.pypi_repository import PyPiRepository flags = PyPiRepository.SEARCH_FULLTEXT if self.option("only-name"): flags = PyPiRepository.SEARCH_FULLTEXT results = PyPiRepository().search(self.argument("tokens"), flags) for result in results: self.line("") name = "{}".format(result.name) name += " ({})".format(result.version) self.line(name) if result.description: self.line(" {}".format(result.description)) PK!Z&&(poetry/console/commands/self/__init__.pyfrom .update import SelfUpdateCommand PK!Ez&poetry/console/commands/self/update.pyimport os import shutil import subprocess import sys from email.parser import Parser from functools import cmp_to_key from ..command import Command class SelfUpdateCommand(Command): """ Updates poetry to the latest version. self:update { version? : The version to update to. } { --preview : Install prereleases. } """ def handle(self): from poetry.__version__ import __version__ from poetry.repositories.pypi_repository import PyPiRepository version = self.argument("version") if not version: version = ">=" + __version__ repo = PyPiRepository(fallback=False) packages = repo.find_packages( "poetry", version, allow_prereleases=self.option("preview") ) if not packages: self.line("No release found for the specified version") return packages.sort( key=cmp_to_key( lambda x, y: 0 if x.version == y.version else int(x.version < y.version or -1) ) ) release = None for package in reversed(packages): if package.is_prerelease(): if self.option("preview"): release = package break continue release = package break if release is None: self.line("No new release found") return if release.version == __version__: self.line("You are using the latest version") return try: self.update(release) except subprocess.CalledProcessError as e: self.line("") self.output.block( [ "[CalledProcessError]", "An error has occured: {}".format(str(e)), e.output, ], style="error", ) return e.returncode def update(self, release): from poetry.utils._compat import Path from poetry.utils.helpers import temporary_directory version = release.version self.line("Updating to {}".format(version)) prefix = sys.prefix base_prefix = getattr(sys, "base_prefix", None) real_prefix = getattr(sys, "real_prefix", None) prefix_poetry = Path(prefix) / "bin" / "poetry" if prefix_poetry.exists(): pip = (prefix_poetry.parent / "pip").resolve() elif ( base_prefix and base_prefix != prefix and (Path(base_prefix) / "bin" / "poetry").exists() ): pip = Path(base_prefix) / "bin" / "pip" elif real_prefix: pip = Path(real_prefix) / "bin" / "pip" else: pip = Path(prefix) / "bin" / "pip" if not pip.exists(): raise RuntimeError("Unable to determine poetry's path") with temporary_directory(prefix="poetry-update-") as temp_dir: temp_dir = Path(temp_dir) dist = temp_dir / "dist" self.line(" - Getting dependencies") self.process( str(pip), "install", "-U", "poetry=={}".format(release.version), "--target", str(dist), ) self.line(" - Vendorizing dependencies") poetry_dir = dist / "poetry" vendor_dir = poetry_dir / "_vendor" # Everything, except poetry itself, should # be put in the _vendor directory for file in dist.glob("*"): if file.name.startswith("poetry"): continue dest = vendor_dir / file.name if file.is_dir(): shutil.copytree(str(file), str(dest)) shutil.rmtree(str(file)) else: shutil.copy(str(file), str(dest)) os.unlink(str(file)) wheel_data = dist / "poetry-{}.dist-info".format(version) / "WHEEL" with wheel_data.open() as f: wheel_data = Parser().parsestr(f.read()) tag = wheel_data["Tag"] # Repack everything and install self.line(" - Updating poetry") shutil.make_archive( str(temp_dir / "poetry-{}-{}".format(version, tag)), format="zip", root_dir=str(dist), ) os.rename( str(temp_dir / "poetry-{}-{}.zip".format(version, tag)), str(temp_dir / "poetry-{}-{}.whl".format(version, tag)), ) self.process( str(pip), "install", "--upgrade", "--no-deps", str(temp_dir / "poetry-{}-{}.whl".format(version, tag)), ) self.line("") self.line( "poetry ({}) " "successfully installed!".format(version) ) def process(self, *args): return subprocess.check_output(list(args), stderr=subprocess.STDOUT) PK!b(,,poetry/console/commands/show.py# -*- coding: utf-8 -*- import sys from .venv_command import VenvCommand class ShowCommand(VenvCommand): """ Shows information about packages. show { package? : Package to inspect. } { --t|tree : List the dependencies as a tree. } { --l|latest : Show the latest version. } { --o|outdated : Show the latest version but only for packages that are outdated. } { --a|all : Show all packages (even those not compatible with current system). } """ help = """The show command displays detailed information about a package, or lists all packages available.""" colors = ["green", "yellow", "cyan", "magenta", "blue"] def handle(self): from poetry.packages.constraints.generic_constraint import GenericConstraint from poetry.repositories.installed_repository import InstalledRepository from poetry.semver import Version from poetry.semver import parse_constraint package = self.argument("package") if self.option("tree"): self.init_styles() if self.option("outdated"): self.input.set_option("latest", True) locked_repo = self.poetry.locker.locked_repository(True) # Show tree view if requested if self.option("tree") and not package: requires = self.poetry.package.requires + self.poetry.package.dev_requires packages = locked_repo.packages for package in packages: for require in requires: if package.name == require.name: self.display_package_tree(package, locked_repo) break return 0 table = self.table(style="compact") table.get_style().set_vertical_border_char("") locked_packages = locked_repo.packages if package: pkg = None for locked in locked_packages: if package.lower() == locked.name: pkg = locked break if not pkg: raise ValueError("Package {} not found".format(package)) if self.option("tree"): self.display_package_tree(pkg, locked_repo) return 0 rows = [ ["name", " : {}".format(pkg.pretty_name)], ["version", " : {}".format(pkg.pretty_version)], ["description", " : {}".format(pkg.description)], ] table.add_rows(rows) table.render() if pkg.requires: self.line("") self.line("dependencies") for dependency in pkg.requires: self.line( " - {} {}".format( dependency.pretty_name, dependency.pretty_constraint ) ) return 0 show_latest = self.option("latest") show_all = self.option("all") terminal = self.get_application().terminal width = terminal.width name_length = version_length = latest_length = 0 latest_packages = {} installed_repo = InstalledRepository.load(self.venv) skipped = [] platform = sys.platform python = Version.parse(".".join([str(i) for i in self._venv.version_info[:3]])) # Computing widths for locked in locked_packages: python_constraint = parse_constraint(locked.requirements.get("python", "*")) platform_constraint = GenericConstraint.parse( locked.requirements.get("platform", "*") ) if not python_constraint.allows(python) or not platform_constraint.matches( GenericConstraint("=", platform) ): skipped.append(locked) if not show_all: continue current_length = len(locked.pretty_name) if not self.output.is_decorated(): installed_status = self.get_installed_status(locked, installed_repo) if installed_status == "not-installed": current_length += 4 name_length = max(name_length, current_length) version_length = max(version_length, len(locked.full_pretty_version)) if show_latest: latest = self.find_latest_package(locked) if not latest: latest = locked latest_packages[locked.pretty_name] = latest latest_length = max(latest_length, len(latest.full_pretty_version)) write_version = name_length + version_length + 3 <= width write_latest = name_length + version_length + latest_length + 3 <= width write_description = name_length + version_length + latest_length + 24 <= width for locked in locked_packages: color = "green" name = locked.pretty_name install_marker = "" if locked in skipped: if not show_all: continue color = "black;options=bold" else: installed_status = self.get_installed_status(locked, installed_repo) if installed_status == "not-installed": color = "red" if not self.output.is_decorated(): # Non installed in non decorated mode install_marker = " (!)" line = "{:{}}{}".format( color, name, name_length - len(install_marker), install_marker ) if write_version: line += " {:{}}".format(locked.full_pretty_version, version_length) if show_latest and write_latest: latest = latest_packages[locked.pretty_name] update_status = self.get_update_status(latest, locked) color = "green" if update_status == "semver-safe-update": color = "red" elif update_status == "update-possible": color = "yellow" line += " {:{}}".format( color, latest.pretty_version, latest_length ) if self.option("outdated") and update_status == "up-to-date": continue if write_description: description = locked.description remaining = width - name_length - version_length - 4 if show_latest: remaining -= latest_length if len(locked.description) > remaining: description = description[: remaining - 3] + "..." line += " " + description self.line(line) def display_package_tree(self, package, installed_repo): self.write("{}".format(package.pretty_name)) self.line(" {} {}".format(package.pretty_version, package.description)) dependencies = package.requires dependencies = sorted(dependencies, key=lambda x: x.name) tree_bar = "├" j = 0 total = len(dependencies) for dependency in dependencies: j += 1 if j == total: tree_bar = "└" level = 1 color = self.colors[level] info = "{tree_bar}── <{color}>{name} {constraint}".format( tree_bar=tree_bar, color=color, name=dependency.name, constraint=dependency.pretty_constraint, ) self._write_tree_line(info) tree_bar = tree_bar.replace("└", " ") packages_in_tree = [package.name, dependency.name] self._display_tree( dependency, installed_repo, packages_in_tree, tree_bar, level + 1 ) def _display_tree( self, dependency, installed_repo, packages_in_tree, previous_tree_bar="├", level=1, ): previous_tree_bar = previous_tree_bar.replace("├", "│") dependencies = [] for package in installed_repo.packages: if package.name == dependency.name: dependencies = package.requires break dependencies = sorted(dependencies, key=lambda x: x.name) tree_bar = previous_tree_bar + " ├" i = 0 total = len(dependencies) for dependency in dependencies: i += 1 current_tree = packages_in_tree if i == total: tree_bar = previous_tree_bar + " └" color_ident = level % len(self.colors) color = self.colors[color_ident] circular_warn = "" if dependency.name in current_tree: circular_warn = "(circular dependency aborted here)" info = "{tree_bar}── <{color}>{name} {constraint} {warn}".format( tree_bar=tree_bar, color=color, name=dependency.name, constraint=dependency.pretty_constraint, warn=circular_warn, ) self._write_tree_line(info) tree_bar = tree_bar.replace("└", " ") if dependency.name not in current_tree: current_tree.append(dependency.name) self._display_tree( dependency, installed_repo, current_tree, tree_bar, level + 1 ) def _write_tree_line(self, line): if not self.output.is_decorated(): line = line.replace("└", "`-") line = line.replace("├", "|-") line = line.replace("──", "-") line = line.replace("│", "|") self.line(line) def init_styles(self): for color in self.colors: self.set_style(color, color) def find_latest_package(self, package): from poetry.version.version_selector import VersionSelector # find the latest version allowed in this pool if package.source_type == "git": return name = package.name selector = VersionSelector(self.poetry.pool) return selector.find_best_candidate(name, ">={}".format(package.pretty_version)) def get_update_status(self, latest, package): from poetry.semver import parse_constraint if latest.full_pretty_version == package.full_pretty_version: return "up-to-date" constraint = parse_constraint("^" + package.pretty_version) if latest.version and constraint.allows(latest.version): # It needs an immediate semver-compliant upgrade return "semver-safe-update" # it needs an upgrade but has potential BC breaks so is not urgent return "update-possible" def get_installed_status(self, locked, installed_repo): for package in installed_repo.packages: if locked.name == package.name: return "installed" return "not-installed" PK!?X!poetry/console/commands/update.pyfrom .venv_command import VenvCommand class UpdateCommand(VenvCommand): """ Update dependencies as according to the pyproject.toml file. update { packages?* : The packages to update. } { --no-dev : Do not install dev dependencies. } { --dry-run : Outputs the operations but will not execute anything (implicitly enables --verbose). } """ _loggers = ["poetry.repositories.pypi_repository"] def handle(self): from poetry.installation import Installer packages = self.argument("packages") installer = Installer( self.output, self.venv, self.poetry.package, self.poetry.locker, self.poetry.pool, ) if packages: installer.whitelist({name: "*" for name in packages}) installer.dev_mode(not self.option("no-dev")) installer.dry_run(self.option("dry-run")) # Force update installer.update(True) return installer.run() PK!cضOO'poetry/console/commands/venv_command.pyfrom .command import Command class VenvCommand(Command): def __init__(self): self._venv = None super(VenvCommand, self).__init__() def initialize(self, i, o): from poetry.utils.venv import Venv super(VenvCommand, self).initialize(i, o) self._venv = Venv.create( o, self.poetry.package.name, cwd=self.poetry.file.parent ) if self._venv.is_venv() and o.is_verbose(): o.writeln("Using virtualenv: {}".format(self._venv.venv)) @property def venv(self): return self._venv PK!< "poetry/console/commands/version.pyimport re from .command import Command class VersionCommand(Command): """ Bumps the version of the project. version { version=patch } """ help = """\ The version command bumps the version of the project and writes the new version back to pyproject.toml. The new version should ideally be a valid semver string or a valid bump rule: patch, minor, major, prepatch, preminor, premajor, prerelease. """ RESERVED = { "major", "minor", "patch", "premajor", "preminor", "prepatch", "prerelease", } def handle(self): version = self.argument("version") version = self.increment_version(self.poetry.package.pretty_version, version) self.line( "Bumping version from {} to {}".format( self.poetry.package.pretty_version, version ) ) content = self.poetry.file.read() poetry_content = content["tool"]["poetry"] poetry_content["version"] = version.text self.poetry.file.write(content) def increment_version(self, version, rule): from poetry.semver import Version try: version = Version.parse(version) except ValueError: raise ValueError("The project's version doesn't seem to follow semver") if rule in {"major", "premajor"}: new = version.next_major if rule == "premajor": new = new.first_prerelease elif rule in {"minor", "preminor"}: new = version.next_minor if rule == "preminor": new = new.first_prerelease elif rule in {"patch", "prepatch"}: new = version.next_patch if rule == "prepatch": new = new.first_prerelease elif rule == "prerelease": if version.is_prerelease(): pre = version.prerelease new_prerelease = int(pre[1]) + 1 new = Version.parse( "{}.{}.{}-{}".format( version.major, version.minor, version.patch, ".".join([pre[0], str(new_prerelease)]), ) ) else: new = version.next_patch.first_prerelease else: new = rule return new PK!!poetry/console/styles/__init__.pyPK!J<poetry/console/styles/poetry.pyfrom cleo.styles import CleoStyle from cleo.styles import OutputStyle class PoetryStyle(CleoStyle): def __init__(self, i, o): super(PoetryStyle, self).__init__(i, o) self.output.get_formatter().add_style("error", "red") self.output.get_formatter().add_style("warning", "yellow") self.output.get_formatter().add_style("question", "cyan") self.output.get_formatter().add_style("comment", "blue") def writeln( self, messages, type=OutputStyle.OUTPUT_NORMAL, verbosity=OutputStyle.VERBOSITY_NORMAL, ): if self.output.verbosity >= verbosity: super(PoetryStyle, self).writeln(messages, type=type) def write( self, messages, newline=False, type=OutputStyle.OUTPUT_NORMAL, verbosity=OutputStyle.VERBOSITY_NORMAL, ): if self.output.verbosity >= verbosity: super(PoetryStyle, self).write(messages, newline=newline, type=type) PK!=N2ccpoetry/exceptions.pyclass PoetryException(Exception): pass class InvalidProjectFile(PoetryException): pass PK!!!poetry/installation/__init__.pyfrom .installer import Installer PK!q %poetry/installation/base_installer.pyclass BaseInstaller: def install(self, package): raise NotImplementedError def update(self, source, target): raise NotImplementedError def remove(self, package): raise NotImplementedError PK!߁2F2F poetry/installation/installer.pyimport sys from typing import List from typing import Union from poetry.packages import Dependency from poetry.packages import Locker from poetry.packages import Package from poetry.packages.constraints.generic_constraint import GenericConstraint from poetry.puzzle import Solver from poetry.puzzle.operations import Install from poetry.puzzle.operations import Uninstall from poetry.puzzle.operations import Update from poetry.puzzle.operations.operation import Operation from poetry.repositories import Pool from poetry.repositories import Repository from poetry.repositories.installed_repository import InstalledRepository from poetry.semver import parse_constraint from poetry.semver import Version from poetry.utils.helpers import canonicalize_name from .base_installer import BaseInstaller from .pip_installer import PipInstaller class Installer: def __init__( self, io, venv, package, # type: Package locker, # type: Locker pool, # type: Pool installed=None, # type: (Union[InstalledRepository, None]) ): self._io = io self._venv = venv self._package = package self._locker = locker self._pool = pool self._dry_run = False self._update = False self._verbose = False self._write_lock = True self._dev_mode = True self._develop = [] self._execute_operations = True self._whitelist = {} self._extras = [] self._installer = self._get_installer() if installed is None: installed = self._get_installed() self._installed_repository = installed @property def installer(self): return self._installer def run(self): # Force update if there is no lock file present if not self._update and not self._locker.is_locked(): self._update = True if self.is_dry_run(): self.verbose(True) self._write_lock = False self._execute_operations = False local_repo = Repository() self._do_install(local_repo) return 0 def dry_run(self, dry_run=True): # type: (bool) -> Installer self._dry_run = dry_run return self def is_dry_run(self): # type: () -> bool return self._dry_run def verbose(self, verbose=True): # type: (bool) -> Installer self._verbose = verbose return self def is_verbose(self): # type: () -> bool return self._verbose def dev_mode(self, dev_mode=True): # type: (bool) -> Installer self._dev_mode = dev_mode return self def is_dev_mode(self): # type: () -> bool return self._dev_mode def develop(self, packages): # type: (dict) -> Installer self._develop = [canonicalize_name(p) for p in packages] return self def update(self, update=True): # type: (bool) -> Installer self._update = update return self def is_updating(self): # type: () -> bool return self._update def execute_operations(self, execute=True): # type: (bool) -> Installer self._execute_operations = execute return self def whitelist(self, packages): # type: (dict) -> Installer self._whitelist = [canonicalize_name(p) for p in packages] return self def extras(self, extras): # type: (list) -> Installer self._extras = extras return self def _do_install(self, local_repo): locked_repository = Repository() if self._update: if self._locker.is_locked() and self._whitelist: # If we update with a lock file present and # we have whitelisted packages (the ones we want to update) # we get the lock file packages to only update # what is strictly needed. # # Otherwise, the lock file information is irrelevant # since we want to update everything. locked_repository = self._locker.locked_repository(True) # Checking extras for extra in self._extras: if extra not in self._package.extras: raise ValueError("Extra [{}] is not specified.".format(extra)) self._io.writeln("Updating dependencies") solver = Solver( self._package, self._pool, self._installed_repository, locked_repository, self._io, ) ops = solver.solve(use_latest=self._whitelist) else: self._io.writeln("Installing dependencies from lock file") locked_repository = self._locker.locked_repository(True) if not self._locker.is_fresh(): self._io.writeln( "" "Warning: The lock file is not up to date with " "the latest changes in pyproject.toml. " "You may be getting outdated dependencies. " "Run update to update them." "" ) for extra in self._extras: if extra not in self._locker.lock_data.get("extras", {}): raise ValueError("Extra [{}] is not specified.".format(extra)) # If we are installing from lock # Filter the operations by comparing it with what is # currently installed ops = self._get_operations_from_lock(locked_repository) self._populate_local_repo(local_repo, ops, locked_repository) # We need to filter operations so that packages # not compatible with the current system, # or optional and not requested, are dropped self._filter_operations(ops, local_repo) self._io.new_line() # Execute operations actual_ops = [op for op in ops if not op.skipped] if not actual_ops and (self._execute_operations or self._dry_run): self._io.writeln("Nothing to install or update") if actual_ops and (self._execute_operations or self._dry_run): installs = [] updates = [] uninstalls = [] skipped = [] for op in ops: if op.skipped: skipped.append(op) continue if op.job_type == "install": installs.append( "{}:{}".format( op.package.pretty_name, op.package.full_pretty_version ) ) elif op.job_type == "update": updates.append( "{}:{}".format( op.target_package.pretty_name, op.target_package.full_pretty_version, ) ) elif op.job_type == "uninstall": uninstalls.append(op.package.pretty_name) self._io.new_line() self._io.writeln( "Package operations: " "{} install{}, " "{} update{}, " "{} removal{}" "{}".format( len(installs), "" if len(installs) == 1 else "s", len(updates), "" if len(updates) == 1 else "s", len(uninstalls), "" if len(uninstalls) == 1 else "s", ", {} skipped".format(len(skipped)) if skipped and self.is_verbose() else "", ) ) # Writing lock before installing if self._update and self._write_lock: updated_lock = self._locker.set_lock_data( self._package, local_repo.packages ) if updated_lock: self._io.writeln("") self._io.writeln("Writing lock file") self._io.writeln("") for op in ops: self._execute(op) def _execute(self, operation): # type: (Operation) -> None """ Execute a given operation. """ method = operation.job_type getattr(self, "_execute_{}".format(method))(operation) def _execute_install(self, operation): # type: (Install) -> None if operation.skipped: if self.is_verbose() and (self._execute_operations or self.is_dry_run()): self._io.writeln( " - Skipping {} ({}) {}".format( operation.package.pretty_name, operation.package.full_pretty_version, operation.skip_reason, ) ) return if self._execute_operations or self.is_dry_run(): self._io.writeln( " - Installing {} ({})".format( operation.package.pretty_name, operation.package.full_pretty_version ) ) if not self._execute_operations: return self._installer.install(operation.package) def _execute_update(self, operation): # type: (Update) -> None source = operation.initial_package target = operation.target_package if operation.skipped: if self.is_verbose() and (self._execute_operations or self.is_dry_run()): self._io.writeln( " - Skipping {} ({}) {}".format( target.pretty_name, target.full_pretty_version, operation.skip_reason, ) ) return if self._execute_operations or self.is_dry_run(): self._io.writeln( " - Updating {} ({} -> {})".format( target.pretty_name, source.full_pretty_version, target.full_pretty_version, ) ) if not self._execute_operations: return self._installer.update(source, target) def _execute_uninstall(self, operation): # type: (Uninstall) -> None if operation.skipped: if self.is_verbose() and (self._execute_operations or self.is_dry_run()): self._io.writeln( " - Not removing {} ({}) {}".format( operation.package.pretty_name, operation.package.full_pretty_version, operation.skip_reason, ) ) return if self._execute_operations or self.is_dry_run(): self._io.writeln( " - Removing {} ({})".format( operation.package.pretty_name, operation.package.full_pretty_version ) ) if not self._execute_operations: return self._installer.remove(operation.package) def _populate_local_repo(self, local_repo, ops, locked_repository): # Add all locked packages from the lock and go from there for package in locked_repository.packages: if not local_repo.has_package(package): local_repo.add_package(package) # Now, walk through all operations and add/remove/update accordingly for op in ops: if isinstance(op, Update): package = op.target_package else: package = op.package acted_on = False for pkg in local_repo.packages: if pkg.name == package.name: # The package we operate on is in the local repo if op.job_type == "update": if pkg.version == package.version: break local_repo.remove_package(pkg) local_repo.add_package(op.target_package) elif op.job_type == "uninstall": local_repo.remove_package(op.package) else: # Even though the package already exists # in the lock file we will prefer the new one # to force updates local_repo.remove_package(pkg) local_repo.add_package(package) acted_on = True if not acted_on: if not local_repo.has_package(package): local_repo.add_package(package) def _get_operations_from_lock( self, locked_repository # type: Repository ): # type: (...) -> List[Operation] installed_repo = self._installed_repository ops = [] extra_packages = [p.name for p in self._get_extra_packages(locked_repository)] for locked in locked_repository.packages: is_installed = False for installed in installed_repo.packages: if locked.name == installed.name: is_installed = True if locked.category == "dev" and not self.is_dev_mode(): ops.append(Uninstall(locked)) elif locked.optional and locked.name not in extra_packages: # Installed but optional and not requested in extras ops.append(Uninstall(locked)) elif locked.version != installed.version: ops.append(Update(installed, locked)) # If it's optional and not in required extras # we do not install if locked.optional and locked.name not in extra_packages: continue op = Install(locked) if is_installed: op.skip("Already installed") ops.append(op) return ops def _filter_operations( self, ops, repo ): # type: (List[Operation], Repository) -> None extra_packages = [p.name for p in self._get_extra_packages(repo)] for op in ops: if isinstance(op, Update): package = op.target_package else: package = op.package if op.job_type == "uninstall": continue if package.name in self._develop and package.source_type == "directory": package.develop = True if op.skipped: op.unskip() python = Version.parse( ".".join([str(i) for i in self._venv.version_info[:3]]) ) if "python" in package.requirements: python_constraint = parse_constraint(package.requirements["python"]) if not python_constraint.allows(python): # Incompatible python versions op.skip("Not needed for the current python version") continue if not package.python_constraint.allows(python): op.skip("Not needed for the current python version") continue if "platform" in package.requirements: platform_constraint = GenericConstraint.parse( package.requirements["platform"] ) if not platform_constraint.matches( GenericConstraint("=", sys.platform) ): # Incompatible systems op.skip("Not needed for the current platform") continue if self._update: extras = {} for extra, deps in self._package.extras.items(): extras[extra] = [dep.name for dep in deps] else: extras = {} for extra, deps in self._locker.lock_data.get("extras", {}).items(): extras[extra] = [dep.lower() for dep in deps] # If a package is optional and not requested # in any extra we skip it if package.optional: if package.name not in extra_packages: op.skip("Not required") # If the package is a dev package and dev packages # are not requests, we skip it if package.category == "dev" and not self.is_dev_mode(): op.skip("Dev dependencies not requested") def _get_extra_packages(self, repo): """ Returns all packages required by extras. Maybe we just let the solver handle it? """ if self._update: extras = {k: [d.name for d in v] for k, v in self._package.extras.items()} else: extras = self._locker.lock_data.get("extras", {}) extra_packages = [] for extra_name, packages in extras.items(): if extra_name not in self._extras: continue extra_packages += [Dependency(p, "*") for p in packages] def _extra_packages(packages): pkgs = [] for package in packages: for pkg in repo.packages: if pkg.name == package.name: pkgs.append(package) pkgs += _extra_packages(pkg.requires) break return pkgs return _extra_packages(extra_packages) def _get_installer(self): # type: () -> BaseInstaller return PipInstaller(self._venv, self._io) def _get_installed(self): # type: () -> InstalledRepository return InstalledRepository.load(self._venv) PK!umm%poetry/installation/noop_installer.pyfrom .base_installer import BaseInstaller class NoopInstaller(BaseInstaller): def __init__(self): self._installs = [] self._updates = [] self._removals = [] @property def installs(self): return self._installs @property def updates(self): return self._updates @property def removals(self): return self._removals def install(self, package): self._installs.append(package) def update(self, source, target): self._updates.append((source, target)) def remove(self, package): self._removals.append(package) PK!gmZJ $poetry/installation/pip_installer.pyimport os import tempfile from subprocess import CalledProcessError from poetry.utils._compat import encode from poetry.utils.venv import Venv from .base_installer import BaseInstaller class PipInstaller(BaseInstaller): def __init__(self, venv, io): # type: (Venv, ...) -> None self._venv = venv self._io = io def install(self, package, update=False): args = ["install", "--no-deps"] if package.source_type == "legacy" and package.source_url: args += ["--index-url", package.source_url] if update: args.append("-U") if package.hashes and not package.source_type: # Format as a requirements.txt # We need to create a requirements.txt file # for each package in order to check hashes. # This is far from optimal but we do not have any # other choice since this is the only way for pip # to verify hashes. req = self.create_temporary_requirement(package) args += ["-r", req] try: self.run(*args) finally: os.unlink(req) else: req = self.requirement(package) if not isinstance(req, list): args.append(req) else: args += req self.run(*args) def update(self, _, target): self.install(target, update=True) def remove(self, package): try: self.run("uninstall", package.name, "-y") except CalledProcessError as e: if "not installed" in str(e): return raise def run(self, *args, **kwargs): # type: (...) -> str return self._venv.run("pip", *args, **kwargs) def requirement(self, package, formatted=False): if formatted and not package.source_type: req = "{}=={}".format(package.name, package.version) for h in package.hashes: req += " --hash sha256:{}".format(h) req += "\n" return req if package.source_type in ["file", "directory"]: if package.root_dir: req = os.path.join(package.root_dir, package.source_url) else: req = os.path.realpath(package.source_url) if package.develop: req = ["-e", req] return req if package.source_type == "git": return "git+{}@{}#egg={}".format( package.source_url, package.source_reference, package.name ) return "{}=={}".format(package.name, package.version) def create_temporary_requirement(self, package): fd, name = tempfile.mkstemp( "reqs.txt", "{}-{}".format(package.name, package.version) ) try: os.write(fd, encode(self.requirement(package, formatted=True))) finally: os.close(fd) return name PK!Ypoetry/io/__init__.pyfrom .null_io import NullIO PK!}poetry/io/null_io.pyfrom cleo.inputs import ListInput from cleo.outputs import NullOutput from poetry.console.styles.poetry import PoetryStyle class NullIO(PoetryStyle): def __init__(self): super(NullIO, self).__init__(ListInput([]), NullOutput()) def is_quiet(self): # type: () -> bool return False def is_verbose(self): # type: () -> bool return False def is_very_verbose(self): # type: () -> bool return False def is_debug(self): # type: () -> bool return False def writeln(self, *args, **kwargs): pass def write(self, *args, **kwargs): pass def new_line(self, *args, **kwargs): pass PK!66poetry/io/raw_argv_input.pyimport sys from cleo.inputs import ArgvInput class RawArgvInput(ArgvInput): def parse(self): self._parsed = self._tokens while True: try: token = self._parsed.pop(0) except IndexError: break self.parse_argument(token) PK!poetry/json/__init__.pyPK!`--&poetry/json/schemas/poetry-schema.json{ "$schema": "http://json-schema.org/draft-04/schema#", "name": "Package", "type": "object", "additionalProperties": false, "required": [ "name", "version", "description" ], "properties": { "name": { "type": "string", "description": "Package name." }, "version": { "type": "string", "description": "Package version." }, "description": { "type": "string", "description": "Short package description." }, "keywords": { "type": "array", "items": { "type": "string", "description": "A tag/keyword that this package relates to." } }, "homepage": { "type": "string", "description": "Homepage URL for the project.", "format": "uri" }, "repository": { "type": "string", "description": "Repository URL for the project.", "format": "uri" }, "documentation": { "type": "string", "description": "Documentation URL for the project.", "format": "uri" }, "license": { "type": "string", "description": "License name." }, "authors": { "$ref": "#/definitions/authors" }, "readme": { "type": "string", "description": "The path to the README file" }, "classifiers": { "type": "array", "description": "A list of trove classifers." }, "dependencies": { "type": "object", "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.", "required": ["python"], "properties": { "python": { "type": "string", "description": "The Python versions the package is compatible with." } }, "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "oneOf": [ { "$ref": "#/definitions/dependency" }, { "$ref": "#/definitions/long-dependency" }, { "$ref": "#/definitions/git-dependency" }, { "$ref": "#/definitions/file-dependency" }, { "$ref": "#/definitions/path-dependency" } ] } }, "additionalProperties": false }, "dev-dependencies": { "type": "object", "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "oneOf": [ { "$ref": "#/definitions/dependency" }, { "$ref": "#/definitions/long-dependency" }, { "$ref": "#/definitions/git-dependency" }, { "$ref": "#/definitions/file-dependency" } ] } }, "additionalProperties": false }, "extras": { "type": "object", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "type": "array", "items": { "type": "string" } } } }, "build": { "type": "string", "description": "The file used to build extensions." }, "source": { "type": "array", "description": "A set of additional repositories where packages can be found.", "additionalProperties": { "$ref": "#/definitions/repository" }, "items": { "$ref": "#/definitions/repository" } }, "scripts": { "type": "object", "description": "A hash of scripts to be installed.", "items": { "type": "string" } }, "plugins": { "type": "object", "description": "A hash of hashes representing plugins", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "type": "object", "patternProperties": { "^[a-zA-Z-_.0-9]+$": { "type": "string" } } } } } }, "definitions": { "authors": { "type": "array", "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list.", "items": { "type": "string" } }, "dependency": { "type": "string", "description": "The constraint of the dependency." }, "long-dependency": { "type": "object", "required": ["version"], "additionalProperties": false, "properties": { "version": { "type": "string", "description": "The constraint of the dependency." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "platform": { "type": "string", "description": "The platform(s) for which the dependency should be installed." }, "allows-prereleases": { "type": "boolean", "description": "Whether the dependency allows prereleases or not." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } } } }, "git-dependency": { "type": "object", "required": ["git"], "additionalProperties": false, "properties": { "git": { "type": "string", "description": "The url of the git repository.", "format": "uri" }, "branch": { "type": "string", "description": "The branch to checkout." }, "tag": { "type": "string", "description": "The tag to checkout." }, "rev": { "type": "string", "description": "The revision to checkout." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "platform": { "type": "string", "description": "The platform(s) for which the dependency should be installed." }, "allows-prereleases": { "type": "boolean", "description": "Whether the dependency allows prereleases or not." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } } } }, "file-dependency": { "type": "object", "required": ["file"], "additionalProperties": false, "properties": { "file": { "type": "string", "description": "The path to the file." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "platform": { "type": "string", "description": "The platform(s) for which the dependency should be installed." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } } } }, "path-dependency": { "type": "object", "required": ["path"], "additionalProperties": false, "properties": { "path": { "type": "string", "description": "The path to the dependency." }, "python": { "type": "string", "description": "The python versions for which the dependency should be installed." }, "platform": { "type": "string", "description": "The platform(s) for which the dependency should be installed." }, "optional": { "type": "boolean", "description": "Whether the dependency is optional or not." }, "extras": { "type": "array", "description": "The required extras for this dependency.", "items": { "type": "string" } }, "develop": { "type": "boolean", "description": "Whether to install the dependency in development mode." } } }, "repository": { "type": "object", "properties": { "name": { "type": "string", "description": "The name of the repository" }, "url": { "type": "string", "description": "The url of the repository", "format": "uri" } } } } } PK!FFpoetry/layouts/__init__.pyfrom typing import Type from .layout import Layout from .src import SrcLayout from .standard import StandardLayout _LAYOUTS = {"src": SrcLayout, "standard": StandardLayout} def layout(name): # type: (str) -> Type[Layout] if name not in _LAYOUTS: raise ValueError("Invalid layout") return _LAYOUTS[name] PK! poetry/layouts/layout.pyfrom poetry.toml import dumps from poetry.toml import loads from poetry.utils.helpers import module_name TESTS_DEFAULT = u"""from {package_name} import __version__ def test_version(): assert __version__ == '{version}' """ POETRY_DEFAULT = """\ [tool.poetry] name = "" version = "" description = "" authors = [] [tool.poetry.dependencies] [tool.poetry.dev-dependencies] """ POETRY_WITH_LICENSE = """\ [tool.poetry] name = "" version = "" description = "" authors = [] license = "" [tool.poetry.dependencies] [tool.poetry.dev-dependencies] """ class Layout(object): def __init__( self, project, version="0.1.0", description="", readme_format="md", author=None, license=None, python="*", dependencies=None, dev_dependencies=None, ): self._project = project self._package_name = module_name(project) self._version = version self._description = description self._readme_format = readme_format self._license = license self._python = python self._dependencies = dependencies or {} self._dev_dependencies = dev_dependencies or {"pytest": "^3.5"} if not author: author = "Your Name " self._author = author def create(self, path, with_tests=True): path.mkdir(parents=True, exist_ok=True) self._create_default(path) self._create_readme(path) if with_tests: self._create_tests(path) self._write_poetry(path) def generate_poetry_content(self): template = POETRY_DEFAULT if self._license: template = POETRY_WITH_LICENSE content = loads(template) poetry_content = content["tool"]["poetry"] poetry_content["name"] = self._project poetry_content["version"] = self._version poetry_content["description"] = self._description poetry_content["authors"].append(self._author) if self._license: poetry_content["license"] = self._license poetry_content["dependencies"]["python"] = self._python for dep_name, dep_constraint in self._dependencies.items(): poetry_content["dependencies"][dep_name] = dep_constraint for dep_name, dep_constraint in self._dev_dependencies.items(): poetry_content["dev-dependencies"][dep_name] = dep_constraint return dumps(content) def _create_default(self, path, src=True): raise NotImplementedError() def _create_readme(self, path): if self._readme_format == "rst": readme_file = path / "README.rst" else: readme_file = path / "README.md" readme_file.touch() def _create_tests(self, path): self._dev_dependencies["pytest"] = "^3.0" tests = path / "tests" tests_init = tests / "__init__.py" tests_default = tests / "test_{}.py".format(self._package_name) tests.mkdir() tests_init.touch(exist_ok=False) with tests_default.open("w") as f: f.write( TESTS_DEFAULT.format( package_name=self._package_name, version=self._version ) ) def _write_poetry(self, path): content = self.generate_poetry_content() poetry = path / "pyproject.toml" with poetry.open("w") as f: f.write(content) PK! ѺUpoetry/layouts/src.py# -*- coding: utf-8 -*- from .layout import Layout DEFAULT = u"""__version__ = '{version}' """ class SrcLayout(Layout): def _create_default(self, path): package_path = path / "src" / self._package_name package_init = package_path / "__init__.py" package_path.mkdir(parents=True) with package_init.open("w") as f: f.write(DEFAULT.format(version=self._version)) PK!ѐpoetry/layouts/standard.py# -*- coding: utf-8 -*- from .layout import Layout DEFAULT = u"""__version__ = '{version}' """ class StandardLayout(Layout): def _create_default(self, path): package_path = path / self._package_name package_init = package_path / "__init__.py" package_path.mkdir() with package_init.open("w") as f: f.write(DEFAULT.format(version=self._version)) PK!B)&*poetry/locations.pyfrom .utils.appdirs import user_cache_dir from .utils.appdirs import user_config_dir CACHE_DIR = user_cache_dir("pypoetry") CONFIG_DIR = user_config_dir("pypoetry") PK!ΨDpoetry/masonry/__init__.py""" This module handles the packaging and publishing of python projects. A lot of the code used here has been taken from `flit `__ and adapted to work with the poetry codebase, so kudos to them for showing the way. """ from .builder import Builder PK!2poetry/masonry/api.py""" PEP-517 compliant buildsystem API """ import logging from pathlib import Path from poetry.poetry import Poetry from poetry.io import NullIO from poetry.utils.venv import Venv from .builders import SdistBuilder from .builders import WheelBuilder log = logging.getLogger(__name__) # PEP 517 specifies that the CWD will always be the source tree poetry = Poetry.create(".") def get_requires_for_build_wheel(config_settings=None): """ Returns a list of requirements for building, as strings """ main, extras = SdistBuilder.convert_dependencies(poetry.package.requires) return main + extras # For now, we require all dependencies to build either a wheel or an sdist. get_requires_for_build_sdist = get_requires_for_build_wheel def build_wheel(wheel_directory, config_settings=None, metadata_directory=None): """Builds a wheel, places it in wheel_directory""" info = WheelBuilder.make_in(poetry, NullIO(), Path(wheel_directory)) return info.file.name def build_sdist(sdist_directory, config_settings=None): """Builds an sdist, places it in sdist_directory""" path = SdistBuilder(poetry, Venv(), NullIO()).build(Path(sdist_directory)) return path.name PK!O366poetry/masonry/builder.pyfrom .builders import CompleteBuilder from .builders import SdistBuilder from .builders import WheelBuilder class Builder: _FORMATS = {"sdist": SdistBuilder, "wheel": WheelBuilder, "all": CompleteBuilder} def __init__(self, poetry, venv, io): self._poetry = poetry self._venv = venv self._io = io def build(self, fmt): if fmt not in self._FORMATS: raise ValueError("Invalid format: {}".format(fmt)) builder = self._FORMATS[fmt](self._poetry, self._venv, self._io) return builder.build() PK!i>$ff#poetry/masonry/builders/__init__.pyfrom .complete import CompleteBuilder from .sdist import SdistBuilder from .wheel import WheelBuilder PK!R&"poetry/masonry/builders/builder.py# -*- coding: utf-8 -*- import os import re import shutil import tempfile from collections import defaultdict from contextlib import contextmanager from poetry.utils._compat import Path from poetry.vcs import get_vcs from ..metadata import Metadata from ..utils.module import Module AUTHOR_REGEX = re.compile("(?u)^(?P[- .,\w\d'’\"()]+) <(?P.+?)>$") class Builder(object): AVAILABLE_PYTHONS = {"2", "2.7", "3", "3.4", "3.5", "3.6", "3.7"} def __init__(self, poetry, venv, io): self._poetry = poetry self._venv = venv self._io = io self._package = poetry.package self._path = poetry.file.parent self._module = Module(self._package.name, self._path.as_posix()) self._meta = Metadata.from_package(self._package) def build(self): raise NotImplementedError() def find_excluded_files(self): # type: () -> list # Checking VCS vcs = get_vcs(self._path) if not vcs: return [] ignored = vcs.get_ignored_files() result = [] for file in ignored: try: file = Path(file).absolute().relative_to(self._path) except ValueError: # Should only happen in tests continue result.append(file) return result def find_files_to_add(self, exclude_build=True): # type: () -> list """ Finds all files to add to the tarball TODO: Support explicit include/exclude """ excluded = self.find_excluded_files() src = self._module.path to_add = [] if not self._module.is_package(): if self._module.is_in_src(): to_add.append(src.relative_to(src.parent.parent)) else: to_add.append(src.relative_to(src.parent)) else: for root, dirs, files in os.walk(src.as_posix()): root = Path(root) if root.name == "__pycache__": continue for file in files: file = root / file file = file.relative_to(self._path) if file in excluded: continue if file.suffix == ".pyc": continue self._io.writeln( " - Adding: {}".format(str(file)), verbosity=self._io.VERBOSITY_VERY_VERBOSE, ) to_add.append(file) # Include project files self._io.writeln( " - Adding: pyproject.toml", verbosity=self._io.VERBOSITY_VERY_VERBOSE, ) to_add.append(Path("pyproject.toml")) # If a license file exists, add it for license_file in self._path.glob("LICENSE*"): self._io.writeln( " - Adding: {}".format( license_file.relative_to(self._path) ), verbosity=self._io.VERBOSITY_VERY_VERBOSE, ) to_add.append(license_file.relative_to(self._path)) # If a README is specificed we need to include it # to avoid errors if "readme" in self._poetry.local_config: readme = self._path / self._poetry.local_config["readme"] if readme.exists(): self._io.writeln( " - Adding: {}".format( readme.relative_to(self._path) ), verbosity=self._io.VERBOSITY_VERY_VERBOSE, ) to_add.append(readme.relative_to(self._path)) # If a build script is specified and explicitely required # we add it to the list of files if self._package.build and not exclude_build: to_add.append(Path(self._package.build)) return sorted(to_add) def convert_entry_points(self): # type: () -> dict result = defaultdict(list) # Scripts -> Entry points for name, ep in self._poetry.local_config.get("scripts", {}).items(): result["console_scripts"].append("{} = {}".format(name, ep)) # Plugins -> entry points plugins = self._poetry.local_config.get("plugins", {}) for groupname, group in plugins.items(): for name, ep in sorted(group.items()): result[groupname].append("{} = {}".format(name, ep)) for groupname in result: result[groupname] = sorted(result[groupname]) return dict(result) @classmethod def convert_author(cls, author): # type: () -> dict m = AUTHOR_REGEX.match(author) name = m.group("name") email = m.group("email") return {"name": name, "email": email} @classmethod @contextmanager def temporary_directory(cls, *args, **kwargs): try: from tempfile import TemporaryDirectory with TemporaryDirectory(*args, **kwargs) as name: yield name except ImportError: name = tempfile.mkdtemp(*args, **kwargs) yield name shutil.rmtree(name) PK!n[[#poetry/masonry/builders/complete.pyimport os import tarfile import poetry.poetry from contextlib import contextmanager from .builder import Builder from .sdist import SdistBuilder from .wheel import WheelBuilder class CompleteBuilder(Builder): def build(self): # We start by building the tarball # We will use it to build the wheel sdist_builder = SdistBuilder(self._poetry, self._venv, self._io) sdist_file = sdist_builder.build() self._io.writeln("") dist_dir = self._path / "dist" with self.unpacked_tarball(sdist_file) as tmpdir: WheelBuilder.make_in( poetry.poetry.Poetry.create(tmpdir), self._venv, self._io, dist_dir, original=self._poetry, ) @classmethod @contextmanager def unpacked_tarball(cls, path): tf = tarfile.open(str(path)) with cls.temporary_directory() as tmpdir: tf.extractall(tmpdir) files = os.listdir(tmpdir) assert len(files) == 1, files yield os.path.join(tmpdir, files[0]) PK!)/*/* poetry/masonry/builders/sdist.py# -*- coding: utf-8 -*- import os import re import tarfile from collections import defaultdict from copy import copy from gzip import GzipFile from io import BytesIO from posixpath import join as pjoin from pprint import pformat from typing import List from poetry.packages import Dependency from poetry.utils._compat import Path from poetry.utils._compat import encode from poetry.utils._compat import to_str from ..utils.helpers import normalize_file_permissions from .builder import Builder SETUP = """\ # -*- coding: utf-8 -*- from distutils.core import setup {before} setup_kwargs = {{ 'name': {name!r}, 'version': {version!r}, 'description': {description!r}, 'long_description': {long_description!r}, 'author': {author!r}, 'author_email': {author_email!r}, 'url': {url!r}, {extra} }} {after} setup(**setup_kwargs) """ PKG_INFO = """\ Metadata-Version: 2.1 Name: {name} Version: {version} Summary: {summary} Home-page: {home_page} Author: {author} Author-email: {author_email} """ class SdistBuilder(Builder): def build(self, target_dir=None): # type: (Path) -> Path self._io.writeln(" - Building sdist") if target_dir is None: target_dir = self._path / "dist" if not target_dir.exists(): target_dir.mkdir(parents=True) target = target_dir / "{}-{}.tar.gz".format( self._package.pretty_name, self._meta.version ) gz = GzipFile(target.as_posix(), mode="wb") tar = tarfile.TarFile( target.as_posix(), mode="w", fileobj=gz, format=tarfile.PAX_FORMAT ) try: tar_dir = "{}-{}".format(self._package.pretty_name, self._meta.version) files_to_add = self.find_files_to_add(exclude_build=False) for relpath in files_to_add: path = self._path / relpath tar_info = tar.gettarinfo( str(path), arcname=pjoin(tar_dir, str(relpath)) ) tar_info = self.clean_tarinfo(tar_info) if tar_info.isreg(): with path.open("rb") as f: tar.addfile(tar_info, f) else: tar.addfile(tar_info) # Symlinks & ? setup = self.build_setup() tar_info = tarfile.TarInfo(pjoin(tar_dir, "setup.py")) tar_info.size = len(setup) tar.addfile(tar_info, BytesIO(setup)) pkg_info = self.build_pkg_info() tar_info = tarfile.TarInfo(pjoin(tar_dir, "PKG-INFO")) tar_info.size = len(pkg_info) tar.addfile(tar_info, BytesIO(pkg_info)) finally: tar.close() gz.close() self._io.writeln(" - Built {}".format(target.name)) return target def build_setup(self): # type: () -> bytes before, extra, after = [], [], [] # If we have a build script, use it if self._package.build: after += [ "from {} import *".format(self._package.build.split(".")[0]), "build(setup_kwargs)", ] if self._module.is_in_src(): before.append("package_dir = \\\n{}\n".format(pformat({"": "src"}))) extra.append("'package_dir': package_dir,") if self._module.is_package(): packages, package_data = self.find_packages(self._module.path.as_posix()) before.append("packages = \\\n{}\n".format(pformat(sorted(packages)))) before.append("package_data = \\\n{}\n".format(pformat(package_data))) extra.append("'packages': packages,") extra.append("'package_data': package_data,") else: extra.append("'py_modules': {!r},".format(to_str(self._module.name))) dependencies, extras = self.convert_dependencies( self._package, self._package.requires ) if dependencies: before.append( "install_requires = \\\n{}\n".format(pformat(sorted(dependencies))) ) extra.append("'install_requires': install_requires,") if extras: before.append("extras_require = \\\n{}\n".format(pformat(extras))) extra.append("'extras_require': extras_require,") entry_points = self.convert_entry_points() if entry_points: before.append("entry_points = \\\n{}\n".format(pformat(entry_points))) extra.append("'entry_points': entry_points,") if self._package.python_versions != "*": python_requires = self._meta.requires_python extra.append("'python_requires': {!r},".format(python_requires)) return encode( SETUP.format( before="\n".join(before), name=to_str(self._meta.name), version=to_str(self._meta.version), description=to_str(self._meta.summary), long_description=to_str(self._meta.description), author=to_str(self._meta.author), author_email=to_str(self._meta.author_email), url=to_str(self._meta.home_page), extra="\n ".join(extra), after="\n".join(after), ) ) def build_pkg_info(self): pkg_info = PKG_INFO.format( name=self._meta.name, version=self._meta.version, summary=self._meta.summary, home_page=self._meta.home_page, author=to_str(self._meta.author), author_email=to_str(self._meta.author_email), ) if self._meta.keywords: pkg_info += "Keywords: {}\n".format(self._meta.keywords) if self._meta.requires_python: pkg_info += "Requires-Python: {}\n".format(self._meta.requires_python) for classifier in self._meta.classifiers: pkg_info += "Classifier: {}\n".format(classifier) for extra in sorted(self._meta.provides_extra): pkg_info += "Provides-Extra: {}\n".format(extra) for dep in sorted(self._meta.requires_dist): pkg_info += "Requires-Dist: {}\n".format(dep) return encode(pkg_info) @classmethod def find_packages(cls, path): """ Discover subpackages and data. It also retrieve necessary files """ pkgdir = os.path.normpath(path) pkg_name = os.path.basename(pkgdir) pkg_data = defaultdict(list) # Undocumented distutils feature: # the empty string matches all package names pkg_data[""].append("*") packages = [pkg_name] subpkg_paths = set() def find_nearest_pkg(rel_path): parts = rel_path.split(os.sep) for i in reversed(range(1, len(parts))): ancestor = "/".join(parts[:i]) if ancestor in subpkg_paths: pkg = ".".join([pkg_name] + parts[:i]) return pkg, "/".join(parts[i:]) # Relative to the top-level package return pkg_name, rel_path for path, dirnames, filenames in os.walk(pkgdir, topdown=True): if os.path.basename(path) == "__pycache__": continue from_top_level = os.path.relpath(path, pkgdir) if from_top_level == ".": continue is_subpkg = "__init__.py" in filenames if is_subpkg: subpkg_paths.add(from_top_level) parts = from_top_level.split(os.sep) packages.append(".".join([pkg_name] + parts)) else: pkg, from_nearest_pkg = find_nearest_pkg(from_top_level) pkg_data[pkg].append(pjoin(from_nearest_pkg, "*")) # Sort values in pkg_data pkg_data = {k: sorted(v) for (k, v) in pkg_data.items()} return sorted(packages), pkg_data @classmethod def convert_dependencies( cls, package, dependencies # type: Package # type: List[Dependency] ): main = [] extras = defaultdict(list) req_regex = re.compile("^(.+) \((.+)\)$") for dependency in dependencies: if dependency.is_optional(): for extra_name, reqs in package.extras.items(): for req in reqs: if req.name == dependency.name: requirement = to_str( dependency.to_pep_508(with_extras=False) ) if ";" in requirement: requirement, conditions = requirement.split(";") requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub( "\\1\\2", requirement.strip() ) extras[extra_name + ":" + conditions.strip()].append( requirement ) continue requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub( "\\1\\2", requirement.strip() ) extras[extra_name].append(requirement) continue requirement = to_str(dependency.to_pep_508()) if ";" in requirement: requirement, conditions = requirement.split(";") requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub("\\1\\2", requirement.strip()) extras[":" + conditions.strip()].append(requirement) continue requirement = requirement.strip() if req_regex.match(requirement): requirement = req_regex.sub("\\1\\2", requirement.strip()) main.append(requirement) return main, dict(extras) @classmethod def clean_tarinfo(cls, tar_info): """ Clean metadata from a TarInfo object to make it more reproducible. - Set uid & gid to 0 - Set uname and gname to "" - Normalise permissions to 644 or 755 - Set mtime if not None """ ti = copy(tar_info) ti.uid = 0 ti.gid = 0 ti.uname = "" ti.gname = "" ti.mode = normalize_file_permissions(ti.mode) return ti PK!7X++ poetry/masonry/builders/wheel.pyfrom __future__ import unicode_literals import contextlib import hashlib import os import re import tempfile import shutil import stat import zipfile from base64 import urlsafe_b64encode from io import StringIO from poetry.__version__ import __version__ from poetry.semver import parse_constraint from poetry.utils._compat import Path from ..utils.helpers import normalize_file_permissions from ..utils.tags import get_abbr_impl from ..utils.tags import get_abi_tag from ..utils.tags import get_impl_ver from ..utils.tags import get_platform from .builder import Builder wheel_file_template = """\ Wheel-Version: 1.0 Generator: poetry {version} Root-Is-Purelib: {pure_lib} Tag: {tag} """ class WheelBuilder(Builder): def __init__(self, poetry, venv, io, target_fp, original=None): super(WheelBuilder, self).__init__(poetry, venv, io) self._records = [] self._original_path = self._path if original: self._original_path = original.file.parent # Open the zip file ready to write self._wheel_zip = zipfile.ZipFile( target_fp, "w", compression=zipfile.ZIP_DEFLATED ) @classmethod def make_in(cls, poetry, venv, io, directory, original=None): # We don't know the final filename until metadata is loaded, so write to # a temporary_file, and rename it afterwards. (fd, temp_path) = tempfile.mkstemp(suffix=".whl", dir=str(directory)) os.close(fd) try: with open(temp_path, "w+b") as fp: wb = WheelBuilder(poetry, venv, io, fp, original=original) wb.build() wheel_path = directory / wb.wheel_filename if wheel_path.exists(): os.unlink(str(wheel_path)) os.rename(temp_path, str(wheel_path)) except: os.unlink(temp_path) raise @classmethod def make(cls, poetry, venv, io): """Build a wheel in the dist/ directory, and optionally upload it. """ dist_dir = poetry.file.parent / "dist" try: dist_dir.mkdir() except FileExistsError: pass cls.make_in(poetry, venv, io, dist_dir) def build(self): self._io.writeln(" - Building wheel") try: self._build() self.copy_module() self.write_metadata() self.write_record() finally: self._wheel_zip.close() self._io.writeln(" - Built {}".format(self.wheel_filename)) def _build(self): if self._package.build: setup = self._path / "setup.py" # We need to place ourselves in the temporary # directory in order to build the package current_path = os.getcwd() try: os.chdir(str(self._path)) self._venv.run( "python", str(setup), "build", "-b", str(self._path / "build") ) finally: os.chdir(current_path) build_dir = self._path / "build" lib = list(build_dir.glob("lib.*")) if not lib: # The result of building the extensions # does not exist, this may due to conditional # builds, so we assume that it's okay return lib = lib[0] for pkg in lib.glob("*"): shutil.rmtree(str(self._path / pkg.name)) shutil.copytree(str(pkg), str(self._path / pkg.name)) def copy_module(self): if self._module.is_package(): files = self.find_files_to_add() # Walk the files and compress them, # sorting everything so the order is stable. for file in sorted(files): full_path = self._path / file if self._module.is_in_src(): try: file = file.relative_to( self._module.path.parent.relative_to(self._path) ) except ValueError: pass # Do not include topmost files if full_path.relative_to(self._path) == Path(file.name): continue self._add_file(full_path, file) else: self._add_file(str(self._module.path), self._module.path.name) def write_metadata(self): if ( "scripts" in self._poetry.local_config or "plugins" in self._poetry.local_config ): with self._write_to_zip(self.dist_info + "/entry_points.txt") as f: self._write_entry_points(f) for base in ("COPYING", "LICENSE"): for path in sorted(self._path.glob(base + "*")): self._add_file(path, "%s/%s" % (self.dist_info, path.name)) with self._write_to_zip(self.dist_info + "/WHEEL") as f: self._write_wheel_file(f) with self._write_to_zip(self.dist_info + "/METADATA") as f: self._write_metadata_file(f) def write_record(self): # Write a record of the files in the wheel with self._write_to_zip(self.dist_info + "/RECORD") as f: for path, hash, size in self._records: f.write("{},sha256={},{}\n".format(path, hash, size)) # RECORD itself is recorded with no hash or size f.write(self.dist_info + "/RECORD,,\n") def find_excluded_files(self): # type: () -> list # Checking VCS return [] @property def dist_info(self): # type: () -> str return self.dist_info_name(self._package.name, self._meta.version) @property def wheel_filename(self): # type: () -> str return "{}-{}-{}.whl".format( re.sub("[^\w\d.]+", "_", self._package.pretty_name, flags=re.UNICODE), re.sub("[^\w\d.]+", "_", self._meta.version, flags=re.UNICODE), self.tag, ) def supports_python2(self): return self._package.python_constraint.allows_any( parse_constraint(">=2.0.0 <3.0.0") ) def dist_info_name(self, distribution, version): # type: (...) -> str escaped_name = re.sub("[^\w\d.]+", "_", distribution, flags=re.UNICODE) escaped_version = re.sub("[^\w\d.]+", "_", version, flags=re.UNICODE) return "{}-{}.dist-info".format(escaped_name, escaped_version) @property def tag(self): if self._package.build: platform = get_platform().replace(".", "_").replace("-", "_") impl_name = get_abbr_impl(self._venv) impl_ver = get_impl_ver(self._venv) impl = impl_name + impl_ver abi_tag = str(get_abi_tag(self._venv)).lower() tag = (impl, abi_tag, platform) else: platform = "any" if self.supports_python2(): impl = "py2.py3" else: impl = "py3" tag = (impl, "none", platform) return "-".join(tag) def _add_file(self, full_path, rel_path): full_path, rel_path = str(full_path), str(rel_path) if os.sep != "/": # We always want to have /-separated paths in the zip file and in # RECORD rel_path = rel_path.replace(os.sep, "/") zinfo = zipfile.ZipInfo(rel_path) # Normalize permission bits to either 755 (executable) or 644 st_mode = os.stat(full_path).st_mode new_mode = normalize_file_permissions(st_mode) zinfo.external_attr = (new_mode & 0xFFFF) << 16 # Unix attributes if stat.S_ISDIR(st_mode): zinfo.external_attr |= 0x10 # MS-DOS directory flag hashsum = hashlib.sha256() with open(full_path, "rb") as src: while True: buf = src.read(1024 * 8) if not buf: break hashsum.update(buf) src.seek(0) self._wheel_zip.writestr(zinfo, src.read()) size = os.stat(full_path).st_size hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=") self._records.append((rel_path, hash_digest, size)) @contextlib.contextmanager def _write_to_zip(self, rel_path): sio = StringIO() yield sio # The default is a fixed timestamp rather than the current time, so # that building a wheel twice on the same computer can automatically # give you the exact same result. date_time = (2016, 1, 1, 0, 0, 0) zi = zipfile.ZipInfo(rel_path, date_time) b = sio.getvalue().encode("utf-8") hashsum = hashlib.sha256(b) hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=") self._wheel_zip.writestr(zi, b, compress_type=zipfile.ZIP_DEFLATED) self._records.append((rel_path, hash_digest, len(b))) def _write_entry_points(self, fp): """ Write entry_points.txt. """ entry_points = self.convert_entry_points() for group_name in sorted(entry_points): fp.write("[{}]\n".format(group_name)) for ep in sorted(entry_points[group_name]): fp.write(ep.replace(" ", "") + "\n") fp.write("\n") def _write_wheel_file(self, fp): fp.write( wheel_file_template.format( version=__version__, pure_lib="true" if self._package.build is None else "false", tag=self.tag, ) ) def _write_metadata_file(self, fp): """ Write out metadata in the 2.x format (email like) """ fp.write("Metadata-Version: 2.1\n") fp.write("Name: {}\n".format(self._meta.name)) fp.write("Version: {}\n".format(self._meta.version)) fp.write("Summary: {}\n".format(self._meta.summary)) fp.write("Home-page: {}\n".format(self._meta.home_page or "UNKNOWN")) fp.write("License: {}\n".format(self._meta.license or "UNKOWN")) # Optional fields if self._meta.keywords: fp.write("Keywords: {}\n".format(self._meta.keywords)) if self._meta.author: fp.write("Author: {}\n".format(self._meta.author)) if self._meta.author_email: fp.write("Author-email: {}\n".format(self._meta.author_email)) if self._meta.requires_python: fp.write("Requires-Python: {}\n".format(self._meta.requires_python)) for classifier in self._meta.classifiers: fp.write("Classifier: {}\n".format(classifier)) for extra in sorted(self._meta.provides_extra): fp.write("Provides-Extra: {}\n".format(extra)) for dep in sorted(self._meta.requires_dist): fp.write("Requires-Dist: {}\n".format(dep)) if self._meta.description_content_type: fp.write( "Description-Content-Type: " "{}\n".format(self._meta.description_content_type) ) if self._meta.description is not None: fp.write("\n" + self._meta.description + "\n") PK!J J poetry/masonry/metadata.pyfrom poetry.utils.helpers import canonicalize_name from poetry.utils.helpers import normalize_version from poetry.version.helpers import format_python_constraint class Metadata: metadata_version = "2.1" # version 1.0 name = None version = None platforms = () supported_platforms = () summary = None description = None keywords = None home_page = None download_url = None author = None author_email = None license = None # version 1.1 classifiers = () requires = () provides = () obsoletes = () # version 1.2 maintainer = None maintainer_email = None requires_python = None requires_external = () requires_dist = [] provides_dist = () obsoletes_dist = () project_urls = () # Version 2.1 description_content_type = None provides_extra = [] @classmethod def from_package(cls, package): # type: (...) -> Metadata meta = cls() meta.name = canonicalize_name(package.name) meta.version = normalize_version(package.version.text) meta.summary = package.description if package.readme: with package.readme.open() as f: meta.description = f.read() meta.keywords = ",".join(package.keywords) meta.home_page = package.homepage or package.repository_url meta.author = package.author_name meta.author_email = package.author_email if package.license: meta.license = package.license.id meta.classifiers = package.all_classifiers # Version 1.2 meta.maintainer = meta.author meta.maintainer_email = meta.author_email meta.requires_python = package.python_constraint meta.requires_dist = [d.to_pep_508() for d in package.requires] # Requires python meta.requires_python = format_python_constraint(package.python_constraint) # Version 2.1 if package.readme: if package.readme.suffix == ".rst": meta.description_content_type = "text/x-rst" elif package.readme.suffix in [".md", ".markdown"]: meta.description_content_type = "text/markdown" else: meta.description_content_type = "text/plain" meta.provides_extra = [e for e in package.extras] return meta PK!5k!!%poetry/masonry/publishing/__init__.pyfrom .publisher import Publisher PK!%8T T &poetry/masonry/publishing/publisher.pyimport toml from poetry.locations import CONFIG_DIR from poetry.utils._compat import Path from .uploader import Uploader class Publisher: """ Registers and publishes packages to remote repositories. """ def __init__(self, poetry, io): self._poetry = poetry self._package = poetry.package self._io = io self._uploader = Uploader(poetry, io) @property def files(self): return self._uploader.files def publish(self, repository_name, username, password): if repository_name: self._io.writeln( "Publishing {} ({}) " "to {}".format( self._package.pretty_name, self._package.pretty_version, repository_name, ) ) else: self._io.writeln( "Publishing {} ({}) " "to PyPI".format( self._package.pretty_name, self._package.pretty_version ) ) if not repository_name: url = "https://upload.pypi.org/legacy/" repository_name = "pypi" else: # Retrieving config information config_file = Path(CONFIG_DIR) / "config.toml" if not config_file.exists(): raise RuntimeError( "Config file does not exist. " "Unable to get repository information" ) with config_file.open() as f: config = toml.loads(f.read()) if ( "repositories" not in config or repository_name not in config["repositories"] ): raise RuntimeError( "Repository {} is not defined".format(repository_name) ) url = config["repositories"][repository_name]["url"] if not (username and password): auth_file = Path(CONFIG_DIR) / "auth.toml" if auth_file.exists(): with auth_file.open() as f: auth_config = toml.loads(f.read()) if ( "http-basic" in auth_config and repository_name in auth_config["http-basic"] ): config = auth_config["http-basic"][repository_name] username = config.get("username") password = config.get("password") # Requesting missing credentials if not username: username = self._io.ask("Username:") if not password: password = self._io.ask_hidden("Password:") # TODO: handle certificates self._uploader.auth(username, password) return self._uploader.upload(url) PK!I0"0"%poetry/masonry/publishing/uploader.pyimport hashlib import io import re from typing import List import requests from requests import adapters from requests.exceptions import HTTPError from requests.packages.urllib3 import util from requests_toolbelt import user_agent from requests_toolbelt.multipart import MultipartEncoder, MultipartEncoderMonitor from poetry.__version__ import __version__ from poetry.utils.helpers import normalize_version from ..metadata import Metadata wheel_file_re = re.compile( r"""^(?P(?P.+?)(-(?P\d.+?))?) ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) \.whl|\.dist-info)$""", re.VERBOSE, ) _has_blake2 = hasattr(hashlib, "blake2b") class Uploader: def __init__(self, poetry, io): self._poetry = poetry self._package = poetry.package self._io = io self._username = None self._password = None @property def user_agent(self): return user_agent("poetry", __version__) @property def adapter(self): retry = util.Retry( connect=5, total=10, method_whitelist=["GET"], status_forcelist=[500, 501, 502, 503], ) return adapters.HTTPAdapter(max_retries=retry) @property def files(self): # type: () -> List[str] dist = self._poetry.file.parent / "dist" version = normalize_version(self._package.version.text) wheels = list( dist.glob( "{}-{}-*.whl".format( re.sub( "[^\w\d.]+", "_", self._package.pretty_name, flags=re.UNICODE ), re.sub("[^\w\d.]+", "_", version, flags=re.UNICODE), ) ) ) tars = list( dist.glob("{}-{}.tar.gz".format(self._package.pretty_name, version)) ) return sorted(wheels + tars) def auth(self, username, password): self._username = username self._password = password def make_session(self): session = requests.session() if self.is_authenticated(): session.auth = (self._username, self._password) session.headers["User-Agent"] = self.user_agent for scheme in ("http://", "https://"): session.mount(scheme, self.adapter) return session def is_authenticated(self): return self._username is not None and self._password is not None def upload(self, url): session = self.make_session() try: self._upload(session, url) finally: session.close() def post_data(self, file): meta = Metadata.from_package(self._package) file_type = self._get_type(file) if _has_blake2: blake2_256_hash = hashlib.blake2b(digest_size=256 // 8) md5_hash = hashlib.md5() sha256_hash = hashlib.sha256() with file.open("rb") as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""): md5_hash.update(content) sha256_hash.update(content) if _has_blake2: blake2_256_hash.update(content) md5_digest = md5_hash.hexdigest() sha2_digest = sha256_hash.hexdigest() if _has_blake2: blake2_256_digest = blake2_256_hash.hexdigest() else: blake2_256_digest = None if file_type == "bdist_wheel": wheel_info = wheel_file_re.match(file.name) py_version = wheel_info.group("pyver") else: py_version = None data = { # identify release "name": meta.name, "version": meta.version, # file content "filetype": file_type, "pyversion": py_version, # additional meta-data "metadata_version": meta.metadata_version, "summary": meta.summary, "home_page": meta.home_page, "author": meta.author, "author_email": meta.author_email, "maintainer": meta.maintainer, "maintainer_email": meta.maintainer_email, "license": meta.license, "description": meta.description, "keywords": meta.keywords, "platform": meta.platforms, "classifiers": meta.classifiers, "download_url": meta.download_url, "supported_platform": meta.supported_platforms, "comment": None, "md5_digest": md5_digest, "sha256_digest": sha2_digest, "blake2_256_digest": blake2_256_digest, # PEP 314 "provides": meta.provides, "requires": meta.requires, "obsoletes": meta.obsoletes, # Metadata 1.2 "project_urls": meta.project_urls, "provides_dist": meta.provides_dist, "obsoletes_dist": meta.obsoletes_dist, "requires_dist": meta.requires_dist, "requires_external": meta.requires_external, "requires_python": meta.requires_python, } # Metadata 2.1 if meta.description_content_type: data["description_content_type"] = meta.description_content_type # TODO: Provides extra return data def _upload(self, session, url): try: self._do_upload(session, url) except HTTPError as e: if ( e.response.status_code not in (403, 400) or e.response.status_code == 400 and "was ever registered" not in e.response.text ): raise # It may be the first time we publish the package # We'll try to register it and go from there try: self._register(session, url) except HTTPError: raise def _do_upload(self, session, url): for file in self.files: # TODO: Check existence resp = self._upload_file(session, url, file) resp.raise_for_status() def _upload_file(self, session, url, file): data = self.post_data(file) data.update( { # action ":action": "file_upload", "protocol_version": "1", } ) data_to_send = self._prepare_data(data) with file.open("rb") as fp: data_to_send.append( ("content", (file.name, fp, "application/octet-stream")) ) encoder = MultipartEncoder(data_to_send) bar = self._io.create_progress_bar(encoder.len) bar.set_format( " - Uploading {0} %percent%%".format(file.name) ) monitor = MultipartEncoderMonitor( encoder, lambda monitor: bar.set_progress(monitor.bytes_read) ) bar.start() resp = session.post( url, data=monitor, allow_redirects=False, headers={"Content-Type": monitor.content_type}, ) if resp.ok: bar.finish() self._io.writeln("") else: self._io.overwrite("") return resp def _register(self, session, url): """ Register a package to a repository. """ dist = self._poetry.file.parent / "dist" file = dist / "{}-{}.tar.gz".format(self._package.name, self._package.version) if not file.exists(): raise RuntimeError('"{0}" does not exist.'.format(file.name)) data = self.post_data(file) data.update({":action": "submit", "protocol_version": "1"}) data_to_send = self._prepare_data(data) encoder = MultipartEncoder(data_to_send) resp = session.post( url, data=encoder, allow_redirects=False, headers={"Content-Type": encoder.content_type}, ) return resp def _prepare_data(self, data): data_to_send = [] for key, value in data.items(): if not isinstance(value, (list, tuple)): data_to_send.append((key, value)) else: for item in value: data_to_send.append((key, item)) return data_to_send def _get_type(self, file): exts = file.suffixes if exts[-1] == ".whl": return "bdist_wheel" elif len(exts) >= 2 and "".join(exts[-2:]) == ".tar.gz": return "sdist" raise ValueError("Unknown distribution format {}".format("".join(exts))) PK! poetry/masonry/utils/__init__.pyPK!ƨC88poetry/masonry/utils/helpers.pydef normalize_file_permissions(st_mode): """ Normalizes the permission bits in the st_mode field from stat to 644/755 Popular VCSs only track whether a file is executable or not. The exact permissions can vary on systems with different umasks. Normalising to 644 (non executable) or 755 (executable) makes builds more reproducible. """ # Set 644 permissions, leaving higher bits of st_mode unchanged new_mode = (st_mode | 0o644) & ~0o133 if st_mode & 0o100: new_mode |= 0o111 # Executable: 644 -> 755 return new_mode PK!,(Ϣpoetry/masonry/utils/module.pyfrom poetry.utils._compat import Path from poetry.utils.helpers import module_name class Module: def __init__(self, name, directory="."): self._name = module_name(name) self._in_src = False # It must exist either as a .py file or a directory, but not both pkg_dir = Path(directory, self._name) py_file = Path(directory, self._name + ".py") if pkg_dir.is_dir() and py_file.is_file(): raise ValueError("Both {} and {} exist".format(pkg_dir, py_file)) elif pkg_dir.is_dir(): self._path = pkg_dir self._is_package = True elif py_file.is_file(): self._path = py_file self._is_package = False else: # Searching for a src module src_pkg_dir = Path(directory, "src", self._name) src_py_file = Path(directory, "src", self._name + ".py") if src_pkg_dir.is_dir() and src_py_file.is_file(): raise ValueError("Both {} and {} exist".format(pkg_dir, py_file)) elif src_pkg_dir.is_dir(): self._in_src = True self._path = src_pkg_dir self._is_package = True elif src_py_file.is_file(): self._in_src = True self._path = src_py_file self._is_package = False else: raise ValueError("No file/folder found for package {}".format(name)) @property def name(self): # type: () -> str return self._name @property def path(self): # type: () -> Path return self._path @property def file(self): # type: () -> Path if self._is_package: return self._path / "__init__.py" else: return self._path def is_package(self): # type: () -> bool return self._is_package def is_in_src(self): # type: () -> bool return self._in_src PK!yP<<poetry/masonry/utils/tags.py""" Generate and work with PEP 425 Compatibility Tags. Base implementation taken from https://github.com/pypa/wheel/blob/master/wheel/pep425tags.py and adapted to work with poetry's venv util. """ from __future__ import unicode_literals import distutils.util import sys import warnings def get_abbr_impl(venv): """Return abbreviated implementation name.""" impl = venv.python_implementation if impl == "PyPy": return "pp" elif impl == "Jython": return "jy" elif impl == "IronPython": return "ip" elif impl == "CPython": return "cp" raise LookupError("Unknown Python implementation: " + impl) def get_impl_ver(venv): """Return implementation version.""" impl_ver = venv.config_var("py_version_nodot") if not impl_ver or get_abbr_impl(venv) == "pp": impl_ver = "".join(map(str, get_impl_version_info(venv))) return impl_ver def get_impl_version_info(venv): """Return sys.version_info-like tuple for use in decrementing the minor version.""" if get_abbr_impl(venv) == "pp": # as per https://github.com/pypa/pip/issues/2882 return venv.version_info[:3] else: return venv.version_info[:2] def get_flag(venv, var, fallback, expected=True, warn=True): """Use a fallback method for determining SOABI flags if the needed config var is unset or unavailable.""" val = venv.config_var(var) if val is None: if warn: warnings.warn( "Config variable '{0}' is unset, Python ABI tag may " "be incorrect".format(var), RuntimeWarning, 2, ) return fallback() return val == expected def get_abi_tag(venv): """Return the ABI tag based on SOABI (if available) or emulate SOABI (CPython 2, PyPy).""" soabi = venv.config_var("SOABI") impl = get_abbr_impl(venv) if not soabi and impl in ("cp", "pp") and hasattr(sys, "maxunicode"): d = "" m = "" u = "" if get_flag( venv, "Py_DEBUG", lambda: hasattr(sys, "gettotalrefcount"), warn=(impl == "cp"), ): d = "d" if get_flag(venv, "WITH_PYMALLOC", lambda: impl == "cp", warn=(impl == "cp")): m = "m" if get_flag( venv, "Py_UNICODE_SIZE", lambda: sys.maxunicode == 0x10ffff, expected=4, warn=(impl == "cp" and venv.version_info < (3, 3)), ) and venv.version_info < (3, 3): u = "u" abi = "%s%s%s%s%s" % (impl, get_impl_ver(venv), d, m, u) elif soabi and soabi.startswith("cpython-"): abi = "cp" + soabi.split("-")[1] elif soabi: abi = soabi.replace(".", "_").replace("-", "_") else: abi = None return abi def get_platform(): """Return our platform name 'win32', 'linux_x86_64'""" # XXX remove distutils dependency result = distutils.util.get_platform().replace(".", "_").replace("-", "_") if result == "linux_x86_64" and sys.maxsize == 2147483647: # pip pull request #3497 result = "linux_i686" return result def get_supported(venv, versions=None, supplied_platform=None): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. """ supported = [] # Versions must be given with respect to the preference if versions is None: versions = [] version_info = get_impl_version_info(venv) major = version_info[:-1] # Support all previous minor Python versions. for minor in range(version_info[-1], -1, -1): versions.append("".join(map(str, major + (minor,)))) impl = get_abbr_impl(venv) abis = [] abi = get_abi_tag(venv) if abi: abis[0:0] = [abi] abi3s = set() import imp for suffix in imp.get_suffixes(): if suffix[0].startswith(".abi"): abi3s.add(suffix[0].split(".", 2)[1]) abis.extend(sorted(list(abi3s))) abis.append("none") platforms = [] if supplied_platform: platforms.append(supplied_platform) platforms.append(get_platform()) # Current version, current API (built specifically for our Python): for abi in abis: for arch in platforms: supported.append(("%s%s" % (impl, versions[0]), abi, arch)) # abi3 modules compatible with older version of Python for version in versions[1:]: # abi3 was introduced in Python 3.2 if version in ("31", "30"): break for abi in abi3s: # empty set if not Python 3 for arch in platforms: supported.append(("%s%s" % (impl, version), abi, arch)) # No abi / arch, but requires our implementation: for i, version in enumerate(versions): supported.append(("%s%s" % (impl, version), "none", "any")) if i == 0: # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(("%s%s" % (impl, versions[0][0]), "none", "any")) # Major Python version + platform; e.g. binaries not using the Python API supported.append(("py%s" % (versions[0][0]), "none", arch)) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(("py%s" % (version,), "none", "any")) if i == 0: supported.append(("py%s" % (version[0]), "none", "any")) return supported PK!poetry/mixology/__init__.pyfrom .version_solver import VersionSolver def resolve_version(root, provider, locked=None, use_latest=None): solver = VersionSolver(root, provider, locked=locked, use_latest=use_latest) with provider.progress(): return solver.solve() PK! Jpoetry/mixology/assignment.pyfrom typing import Any from .incompatibility import Incompatibility from .term import Term class Assignment(Term): """ A term in a PartialSolution that tracks some additional metadata. """ def __init__(self, dependency, is_positive, decision_level, index, cause=None): super(Assignment, self).__init__(dependency, is_positive) self._decision_level = decision_level self._index = index self._cause = cause @property def decision_level(self): # type: () -> int return self._decision_level @property def index(self): # type: () -> int return self._index @property def cause(self): # type: () -> Incompatibility return self._cause @classmethod def decision( cls, package, decision_level, index ): # type: (Any, int, int) -> Assignment return cls(package.to_dependency(), True, decision_level, index) @classmethod def derivation( cls, dependency, is_positive, cause, decision_level, index ): # type: (Any, bool, Incompatibility, int, int) -> Assignment return cls(dependency, is_positive, decision_level, index, cause) def is_decision(self): # type: () -> bool return self._cause is None PK!;{%%poetry/mixology/failure.pyfrom typing import Dict from typing import List from typing import Tuple from .incompatibility import Incompatibility from .incompatibility_cause import ConflictCause class SolveFailure(Exception): def __init__(self, incompatibility): # type: (Incompatibility) -> None assert incompatibility.terms[0].dependency.is_root self._incompatibility = incompatibility @property def message(self): return str(self) def __str__(self): return _Writer(self._incompatibility).write() class _Writer: def __init__(self, root): # type: (Incompatibility) -> None self._root = root self._derivations = {} # type: Dict[Incompatibility, int] self._lines = [] # type: List[Tuple[str, int]] self._line_numbers = {} # type: Dict[Incompatibility, int] self._count_derivations(self._root) def write(self): buffer = [] if isinstance(self._root.cause, ConflictCause): self._visit(self._root, {}) else: self._write( self._root, "Because {}, version solving failed.".format(self._root) ) padding = ( 0 if not self._line_numbers else len("({})".format(list(self._line_numbers.values())[-1])) ) last_was_empty = False for line in self._lines: message = line[0] if not message: if not last_was_empty: buffer.append("") last_was_empty = True continue last_was_empty = False number = line[-1] if number is not None: message = "({})".format(number).ljust(padding) + message else: message = " " * padding + message buffer.append(message) return "\n".join(buffer) def _write( self, incompatibility, message, numbered=False ): # type: (Incompatibility, str, bool) -> None if numbered: number = len(self._line_numbers) + 1 self._line_numbers[incompatibility] = number self._lines.append((message, number)) else: self._lines.append((message, None)) def _visit( self, incompatibility, details_for_incompatibility, conclusion=False ): # type: (Incompatibility, Dict, bool) -> None numbered = conclusion or self._derivations[incompatibility] > 1 conjunction = conclusion or ("So," if incompatibility == self._root else "And") incompatibility_string = str(incompatibility) cause = incompatibility.cause # type: ConflictCause details_for_cause = {} if isinstance(cause.conflict.cause, ConflictCause) and isinstance( cause.other.cause, ConflictCause ): conflict_line = self._line_numbers.get(cause.conflict) other_line = self._line_numbers.get(cause.other) if conflict_line is not None and other_line is not None: self._write( incompatibility, "Because {}, {}.".format( cause.conflict.and_to_string( cause.other, details_for_cause, conflict_line, other_line ), incompatibility_string, ), numbered=numbered, ) elif conflict_line is not None or other_line is not None: if conflict_line is not None: with_line = cause.conflict without_line = cause.other line = conflict_line else: with_line = cause.other without_line = cause.conflict line = other_line self._visit(without_line, details_for_cause) self._write( incompatibility, "{} because {} ({}), {}.".format( conjunction, str(with_line), line, incompatibility_string ), numbered=numbered, ) else: single_line_conflict = self._is_single_line(cause.conflict.cause) single_line_other = self._is_single_line(cause.other.cause) if single_line_other or single_line_conflict: first = cause.conflict if single_line_other else cause.other second = cause.other if single_line_other else cause.conflict self._visit(first, details_for_cause) self._visit(second, details_for_cause) self._write( incompatibility, "Thus, {}.".format(incompatibility_string), numbered=numbered, ) else: self._visit(cause.conflict, {}, conclusion=True) self._lines.append(("", None)) self._visit(cause.other, details_for_cause) self._write( incompatibility, "{} because {} ({}), {}".format( conjunction, str(cause.conflict), self._line_numbers[cause.conflict], incompatibility_string, ), numbered=numbered, ) elif isinstance(cause.conflict.cause, ConflictCause) or isinstance( cause.other.cause, ConflictCause ): derived = ( cause.conflict if isinstance(cause.conflict.cause, ConflictCause) else cause.other ) ext = ( cause.other if isinstance(cause.conflict.cause, ConflictCause) else cause.conflict ) derived_line = self._line_numbers.get(derived) if derived_line is not None: self._write( incompatibility, "Because {}, {}.".format( ext.and_to_string( derived, details_for_cause, None, derived_line ), incompatibility_string, ), numbered=numbered, ) elif self._is_collapsible(derived): derived_cause = derived.cause # type: ConflictCause if isinstance(derived_cause.conflict.cause, ConflictCause): collapsed_derived = derived_cause.conflict else: collapsed_derived = derived_cause.other if isinstance(derived_cause.conflict.cause, ConflictCause): collapsed_ext = derived_cause.other else: collapsed_ext = derived_cause.conflict details_for_cause = {} self._visit(collapsed_derived, details_for_cause) self._write( incompatibility, "{} because {}, {}.".format( conjunction, collapsed_ext.and_to_string(ext, details_for_cause, None, None), incompatibility_string, ), numbered=numbered, ) else: self._visit(derived, details_for_cause) self._write( incompatibility, "{} because {}, {}.".format( conjunction, str(ext), incompatibility_string ), numbered=numbered, ) else: self._write( incompatibility, "Because {}, {}.".format( cause.conflict.and_to_string( cause.other, details_for_cause, None, None ), incompatibility_string, ), numbered=numbered, ) def _is_collapsible(self, incompatibility): # type: (Incompatibility) -> bool if self._derivations[incompatibility] > 1: return False cause = incompatibility.cause # type: ConflictCause if isinstance(cause.conflict.cause, ConflictCause) and isinstance( cause.other.cause, ConflictCause ): return False if not isinstance(cause.conflict.cause, ConflictCause) and not isinstance( cause.other.cause, ConflictCause ): return False complex = ( cause.conflict if isinstance(cause.conflict.cause, ConflictCause) else cause.other ) return complex not in self._line_numbers def _is_single_line(self, cause): # type: (ConflictCause) -> bool return not isinstance(cause.conflict.cause, ConflictCause) and not isinstance( cause.other.cause, ConflictCause ) def _count_derivations(self, incompatibility): # type: (Incompatibility) -> None if incompatibility in self._derivations: self._derivations[incompatibility] += 1 else: self._derivations[incompatibility] = 1 cause = incompatibility.cause if isinstance(cause, ConflictCause): self._count_derivations(cause.conflict) self._count_derivations(cause.other) PK!!;;"poetry/mixology/incompatibility.pyfrom typing import Dict from typing import List from .incompatibility_cause import ConflictCause from .incompatibility_cause import DependencyCause from .incompatibility_cause import IncompatibilityCause from .incompatibility_cause import NoVersionsCause from .incompatibility_cause import PackageNotFoundCause from .incompatibility_cause import PlatformCause from .incompatibility_cause import PythonCause from .incompatibility_cause import RootCause from .term import Term class Incompatibility: def __init__( self, terms, cause ): # type: (List[Term], IncompatibilityCause) -> None # Remove the root package from generated incompatibilities, since it will # always be satisfied. This makes error reporting clearer, and may also # make solving more efficient. if ( len(terms) != 1 and isinstance(cause, ConflictCause) and any([term.is_positive() and term.dependency.is_root for term in terms]) ): terms = [ term for term in terms if not term.is_positive() or not term.dependency.is_root ] if ( len(terms) == 1 # Short-circuit in the common case of a two-term incompatibility with # two different packages (for example, a dependency). or len(terms) == 2 and terms[0].dependency.name != terms[-1].dependency.name ): pass else: # Coalesce multiple terms about the same package if possible. by_name = {} # type: Dict[str, Dict[str, Term]] for term in terms: if term.dependency.name not in by_name: by_name[term.dependency.name] = {} by_ref = by_name[term.dependency.name] ref = term.dependency.name if ref in by_ref: by_ref[ref] = by_ref[ref].intersect(term) # If we have two terms that refer to the same package but have a null # intersection, they're mutually exclusive, making this incompatibility # irrelevant, since we already know that mutually exclusive version # ranges are incompatible. We should never derive an irrelevant # incompatibility. assert by_ref[ref] is not None else: by_ref[ref] = term new_terms = [] for by_ref in by_name.values(): positive_terms = [ term for term in by_ref.values() if term.is_positive() ] if positive_terms: new_terms += positive_terms continue new_terms += list(by_ref.values()) terms = new_terms self._terms = terms self._cause = cause @property def terms(self): # type: () -> List[Term] return self._terms @property def cause(self): # type: () -> IncompatibilityCause return self._cause def is_failure(self): # type: () -> bool return len(self._terms) == 0 or ( len(self._terms) == 1 and self._terms[0].dependency.is_root ) def __str__(self): if isinstance(self._cause, DependencyCause): assert len(self._terms) == 2 depender = self._terms[0] dependee = self._terms[1] assert depender.is_positive() assert not dependee.is_positive() return "{} depends on {}".format( self._terse(depender, allow_every=True), self._terse(dependee) ) elif isinstance(self._cause, PythonCause): assert len(self._terms) == 1 assert self._terms[0].is_positive() cause = self._cause # type: PythonCause text = "{} requires ".format(self._terse(self._terms[0], allow_every=True)) text += "Python {}".format(cause.python_version) return text elif isinstance(self._cause, PlatformCause): assert len(self._terms) == 1 assert self._terms[0].is_positive() cause = self._cause # type: PlatformCause text = "{} requires ".format(self._terse(self._terms[0], allow_every=True)) text += "platform {}".format(cause.platform) return text elif isinstance(self._cause, NoVersionsCause): assert len(self._terms) == 1 assert self._terms[0].is_positive() return "no versions of {} match {}".format( self._terms[0].dependency.name, self._terms[0].constraint ) elif isinstance(self._cause, PackageNotFoundCause): assert len(self._terms) == 1 assert self._terms[0].is_positive() return "{} doesn't exist".format(self._terms[0].dependency.name) elif isinstance(self._cause, RootCause): assert len(self._terms) == 1 assert not self._terms[0].is_positive() assert self._terms[0].dependency.is_root return "{} is {}".format( self._terms[0].dependency.name, self._terms[0].dependency.constraint ) elif self.is_failure(): return "version solving failed" if len(self._terms) == 1: term = self._terms[0] if term.constraint.is_any(): return "{} is {}".format( term.dependency.name, "forbidden" if term.is_positive() else "required", ) else: return "{} is {}".format( term.dependency.name, "forbidden" if term.is_positive() else "required", ) if len(self._terms) == 2: term1 = self._terms[0] term2 = self._terms[1] if term1.is_positive() == term2.is_positive(): if term1.is_positive(): package1 = ( term1.dependency.name if term1.constraint.is_any() else self._terse(term1) ) package2 = ( term2.dependency.name if term2.constraint.is_any() else self._terse(term2) ) return "{} is incompatible with {}".format(package1, package2) else: return "either {} or {}".format( self._terse(term1), self._terse(term2) ) positive = [] negative = [] for term in self._terms: if term.is_positive(): positive.append(self._terse(term)) else: negative.append(self._terse(term)) if positive and negative: if len(positive) == 1: positive_term = [term for term in self._terms if term.is_positive()][0] return "{} requires {}".format( self._terse(positive_term, allow_every=True), " or ".join(negative) ) else: return "if {} then {}".format( " and ".join(positive), " or ".join(negative) ) elif positive: return "one of {} must be false".format(" or ".join(positive)) else: return "one of {} must be true".format(" or ".join(negative)) def and_to_string( self, other, details, this_line, other_line ): # type: (Incompatibility, dict, int, int) -> str requires_both = self._try_requires_both(other, details, this_line, other_line) if requires_both is not None: return requires_both requires_through = self._try_requires_through( other, details, this_line, other_line ) if requires_through is not None: return requires_through requires_forbidden = self._try_requires_forbidden( other, details, this_line, other_line ) if requires_forbidden is not None: return requires_forbidden buffer = [str(self)] if this_line is not None: buffer.append(" " + this_line) buffer.append(" and {}".format(str(other))) if other_line is not None: buffer.append(" " + other_line) return "\n".join(buffer) def _try_requires_both( self, other, details, this_line, other_line ): # type: (Incompatibility, dict, int, int) -> str if len(self._terms) == 1 or len(other.terms) == 1: return this_positive = self._single_term_where(lambda term: term.is_positive()) if this_positive is None: return other_positive = other._single_term_where(lambda term: term.is_positive()) if other_positive is None: return if this_positive.dependency != other_positive.dependency: return this_negatives = " or ".join( [self._terse(term) for term in self._terms if not term.is_positive()] ) other_negatives = " or ".join( [self._terse(term) for term in other.terms if not term.is_positive()] ) buffer = [self._terse(this_positive, allow_every=True) + " "] is_dependency = isinstance(self.cause, DependencyCause) and isinstance( other.cause, DependencyCause ) if is_dependency: buffer.append("depends on") else: buffer.append("requires") buffer.append(" both {}".format(this_negatives)) if this_line is not None: buffer.append(" ({})".format(this_line)) buffer.append(" and {}".format(other_negatives)) if other_line is not None: buffer.append(" ({})".format(other_line)) return "".join(buffer) def _try_requires_through( self, other, details, this_line, other_line ): # type: (Incompatibility, dict, int, int) -> str if len(self._terms) == 1 or len(other.terms) == 1: return this_negative = self._single_term_where(lambda term: not term.is_positive()) other_negative = other._single_term_where(lambda term: not term.is_positive()) if this_negative is None and other_negative is None: return this_positive = self._single_term_where(lambda term: term.is_positive()) other_positive = self._single_term_where(lambda term: term.is_positive()) if ( this_negative is not None and other_positive is not None and this_negative.dependency.name == other_positive.dependency.name and this_negative.inverse.satisfies(other_positive) ): prior = self prior_negative = this_negative prior_line = this_line latter = other latter_line = other_line elif ( other_negative is not None and this_positive is not None and other_negative.dependency.name == this_positive.dependency.name and other_negative.inverse.satisfies(this_positive) ): prior = other prior_negative = other_negative prior_line = other_line latter = self latter_line = this_line else: return prior_positives = [term for term in prior.terms if term.is_positive()] buffer = [] if len(prior_positives) > 1: prior_string = " or ".join([self._terse(term) for term in prior_positives]) buffer.append("if {} then ".format(prior_string)) else: if isinstance(prior.cause, DependencyCause): verb = "depends on" else: verb = "requires" buffer.append( "{} {} ".format(self._terse(prior_positives[0], allow_every=True), verb) ) buffer.append(self._terse(prior_negative)) if prior_line is not None: buffer.append(" ({})".format(prior_line)) buffer.append(" which ") if isinstance(latter.cause, DependencyCause): buffer.append("depends on ") else: buffer.append("requires ") buffer.append( " or ".join( [self._terse(term) for term in latter.terms if not term.is_positive()] ) ) if latter_line is not None: buffer.append(" ({})".format(latter_line)) return "".join(buffer) def _try_requires_forbidden( self, other, details, this_line, other_line ): # type: (Incompatibility, dict, int, int) -> str if len(self._terms) != 1 and len(other.terms) != 1: return None if len(self.terms) == 1: prior = other latter = self prior_line = other_line latter_line = this_line else: prior = self latter = other prior_line = this_line latter_line = other_line negative = prior._single_term_where(lambda term: not term.is_positive()) if negative is None: return if not negative.inverse.satisfies(latter.terms[0]): return positives = [t for t in prior.terms if t.is_positive()] buffer = [] if len(positives) > 1: prior_string = " or ".join([self._terse(term) for term in positives]) buffer.append("if {} then ".format(prior_string)) else: buffer.append(self._terse(positives[0], allow_every=True)) if isinstance(prior.cause, DependencyCause): buffer.append(" depends on ") else: buffer.append(" requires ") buffer.append(self._terse(latter.terms[0]) + " ") if prior_line is not None: buffer.append("({}) ".format(prior_line)) if isinstance(latter.cause, PythonCause): cause = latter.cause # type: PythonCause buffer.append("which requires Python {}".format(cause.python_version)) elif isinstance(latter.cause, NoVersionsCause): buffer.append("which doesn't match any versions") elif isinstance(latter.cause, PackageNotFoundCause): buffer.append("which doesn't exist") else: buffer.append("which is forbidden") if latter_line is not None: buffer.append(" ({})".format(latter_line)) return "".join(buffer) def _terse(self, term, allow_every=False): if allow_every and term.constraint.is_any(): return "every version of {}".format(term.dependency.name) return str(term.dependency) def _single_term_where(self, callable): # type: (callable) -> Term found = None for term in self._terms: if not callable(term): continue if found is not None: return found = term return found def __repr__(self): return "".format(str(self)) PK!ꗺ(poetry/mixology/incompatibility_cause.pyclass IncompatibilityCause(Exception): """ The reason and Incompatibility's terms are incompatible. """ class RootCause(IncompatibilityCause): pass class NoVersionsCause(IncompatibilityCause): pass class DependencyCause(IncompatibilityCause): pass class ConflictCause(IncompatibilityCause): """ The incompatibility was derived from two existing incompatibilities during conflict resolution. """ def __init__(self, conflict, other): self._conflict = conflict self._other = other @property def conflict(self): return self._conflict @property def other(self): return self._other def __str__(self): return str(self._conflict) class PythonCause(IncompatibilityCause): """ The incompatibility represents a package's python constraint (Python versions) being incompatible with the current python version. """ def __init__(self, python_version): self._python_version = python_version @property def python_version(self): return self._python_version class PlatformCause(IncompatibilityCause): """ The incompatibility represents a package's platform constraint (OS most likely) being incompatible with the current platform. """ def __init__(self, platform): self._platform = platform @property def platform(self): return self._platform class PackageNotFoundCause(IncompatibilityCause): """ The incompatibility represents a package that couldn't be found by its source. """ def __init__(self, error): self._error = error @property def error(self): return self._error PK!m0#poetry/mixology/partial_solution.pyfrom collections import OrderedDict from typing import Any from typing import Dict from typing import List from poetry.packages import Dependency from poetry.packages import Package from .assignment import Assignment from .incompatibility import Incompatibility from .set_relation import SetRelation from .term import Term class PartialSolution: """ # A list of Assignments that represent the solver's current best guess about # what's true for the eventual set of package versions that will comprise the # total solution. # # See https://github.com/dart-lang/mixology/tree/master/doc/solver.md#partial-solution. """ def __init__(self): # The assignments that have been made so far, in the order they were # assigned. self._assignments = [] # type: List[Assignment] # The decisions made for each package. self._decisions = OrderedDict() # type: Dict[str, Package] # The intersection of all positive Assignments for each package, minus any # negative Assignments that refer to that package. # # This is derived from self._assignments. self._positive = OrderedDict() # type: Dict[str, Term] # The union of all negative Assignments for each package. # # If a package has any positive Assignments, it doesn't appear in this # map. # # This is derived from self._assignments. self._negative = OrderedDict() # type: Dict[str, Dict[str, Term]] # The number of distinct solutions that have been attempted so far. self._attempted_solutions = 1 # Whether the solver is currently backtracking. self._backtracking = False @property def decisions(self): # type: () -> List[Package] return list(self._decisions.values()) @property def decision_level(self): # type: () -> int return len(self._decisions) @property def attempted_solutions(self): # type: () -> int return self._attempted_solutions @property def unsatisfied(self): # type: () -> List[Dependency] return [ term.dependency for term in self._positive.values() if term.dependency.name not in self._decisions ] def decide(self, package): # type: (Package) -> None """ Adds an assignment of package as a decision and increments the decision level. """ # When we make a new decision after backtracking, count an additional # attempted solution. If we backtrack multiple times in a row, though, we # only want to count one, since we haven't actually started attempting a # new solution. if self._backtracking: self._attempted_solutions += 1 self._backtracking = False self._decisions[package.name] = package self._assign( Assignment.decision(package, self.decision_level, len(self._assignments)) ) def derive( self, dependency, is_positive, cause ): # type: (Dependency, bool, Incompatibility) -> None """ Adds an assignment of package as a derivation. """ self._assign( Assignment.derivation( dependency, is_positive, cause, self.decision_level, len(self._assignments), ) ) def _assign(self, assignment): # type: (Assignment) -> None """ Adds an Assignment to _assignments and _positive or _negative. """ self._assignments.append(assignment) self._register(assignment) def backtrack(self, decision_level): # type: (int) -> None """ Resets the current decision level to decision_level, and removes all assignments made after that level. """ self._backtracking = True packages = set() while self._assignments[-1].decision_level > decision_level: removed = self._assignments.pop(-1) packages.add(removed.dependency.name) if removed.is_decision(): del self._decisions[removed.dependency.name] # Re-compute _positive and _negative for the packages that were removed. for package in packages: if package in self._positive: del self._positive[package] if package in self._negative: del self._negative[package] for assignment in self._assignments: if assignment.dependency.name in packages: self._register(assignment) def _register(self, assignment): # type: (Assignment) -> None """ Registers an Assignment in _positive or _negative. """ name = assignment.dependency.name old_positive = self._positive.get(name) if old_positive is not None: self._positive[name] = old_positive.intersect(assignment) return ref = assignment.dependency.name negative_by_ref = self._negative.get(name) old_negative = None if negative_by_ref is None else negative_by_ref.get(ref) if old_negative is None: term = assignment else: term = assignment.intersect(old_negative) if term.is_positive(): if name in self._negative: del self._negative[name] self._positive[name] = term else: if name not in self._negative: self._negative[name] = {} self._negative[name][ref] = term def satisfier(self, term): # type: (Term) -> Assignment """ Returns the first Assignment in this solution such that the sublist of assignments up to and including that entry collectively satisfies term. """ assigned_term = None # type: Term for assignment in self._assignments: if assignment.dependency.name != term.dependency.name: continue if ( not assignment.dependency.is_root and not assignment.dependency.name == term.dependency.name ): if not assignment.is_positive(): continue assert not term.is_positive() return assignment if assigned_term is None: assigned_term = assignment else: assigned_term = assigned_term.intersect(assignment) # As soon as we have enough assignments to satisfy term, return them. if assigned_term.satisfies(term): return assignment raise RuntimeError("[BUG] {} is not satisfied.".format(term)) def satisfies(self, term): # type: (Term) -> bool return self.relation(term) == SetRelation.SUBSET def relation(self, term): # type: (Term) -> int positive = self._positive.get(term.dependency.name) if positive is not None: return positive.relation(term) by_ref = self._negative.get(term.dependency.name) if by_ref is None: return SetRelation.OVERLAPPING negative = by_ref[term.dependency.name] if negative is None: return SetRelation.OVERLAPPING return negative.relation(term) PK!⟶eepoetry/mixology/result.pyclass SolverResult: def __init__(self, root, packages, attempted_solutions): self._root = root self._packages = packages self._attempted_solutions = attempted_solutions @property def packages(self): return self._packages @property def attempted_solutions(self): return self._attempted_solutions PK!:m poetry/mixology/set_relation.pyclass SetRelation: """ An enum of possible relationships between two sets. """ SUBSET = "subset" DISJOINT = "disjoint" OVERLAPPING = "overlapping" PK!]}((poetry/mixology/term.py# -*- coding: utf-8 -*- from typing import Union from poetry.packages import Dependency from .set_relation import SetRelation class Term(object): """ A statement about a package which is true or false for a given selection of package versions. See https://github.com/dart-lang/pub/tree/master/doc/solver.md#term. """ def __init__(self, dependency, is_positive): # type: Dependency # type: bool self._dependency = dependency self._positive = is_positive @property def inverse(self): # type: () -> Term return Term(self._dependency, not self.is_positive()) @property def dependency(self): return self._dependency @property def constraint(self): return self._dependency.constraint def is_positive(self): # type: () -> bool return self._positive def satisfies(self, other): # type: (Term) -> bool """ Returns whether this term satisfies another. """ return ( self.dependency.name == other.dependency.name and self.relation(other) == SetRelation.SUBSET ) def relation(self, other): # type: (Term) -> int """ Returns the relationship between the package versions allowed by this term and another. """ if self.dependency.name != other.dependency.name: raise ValueError( "{} should refer to {}".format(other, self.dependency.name) ) other_constraint = other.constraint if other.is_positive(): if self.is_positive(): if not self._compatible_dependency(other.dependency): return SetRelation.DISJOINT # foo ^1.5.0 is a subset of foo ^1.0.0 if other_constraint.allows_all(self.constraint): return SetRelation.SUBSET # foo ^2.0.0 is disjoint with foo ^1.0.0 if not self.constraint.allows_any(other_constraint): return SetRelation.DISJOINT return SetRelation.OVERLAPPING else: if not self._compatible_dependency(other.dependency): return SetRelation.OVERLAPPING # not foo ^1.0.0 is disjoint with foo ^1.5.0 if self.constraint.allows_all(other_constraint): return SetRelation.DISJOINT # not foo ^1.5.0 overlaps foo ^1.0.0 # not foo ^2.0.0 is a superset of foo ^1.5.0 return SetRelation.OVERLAPPING else: if self.is_positive(): if not self._compatible_dependency(other.dependency): return SetRelation.SUBSET # foo ^2.0.0 is a subset of not foo ^1.0.0 if not other_constraint.allows_any(self.constraint): return SetRelation.SUBSET # foo ^1.5.0 is disjoint with not foo ^1.0.0 if other_constraint.allows_all(self.constraint): return SetRelation.DISJOINT # foo ^1.0.0 overlaps not foo ^1.5.0 return SetRelation.OVERLAPPING else: if not self._compatible_dependency(other.dependency): return SetRelation.OVERLAPPING # not foo ^1.0.0 is a subset of not foo ^1.5.0 if self.constraint.allows_all(other_constraint): return SetRelation.SUBSET # not foo ^2.0.0 overlaps not foo ^1.0.0 # not foo ^1.5.0 is a superset of not foo ^1.0.0 return SetRelation.OVERLAPPING def intersect(self, other): # type: (Term) -> Union[Term, None] """ Returns a Term that represents the packages allowed by both this term and another """ if self.dependency.name != other.dependency.name: raise ValueError( "{} should refer to {}".format(other, self.dependency.name) ) if self._compatible_dependency(other.dependency): if self.is_positive() != other.is_positive(): # foo ^1.0.0 ∩ not foo ^1.5.0 → foo >=1.0.0 <1.5.0 positive = self if self.is_positive() else other negative = other if self.is_positive() else self return self._non_empty_term( positive.constraint.difference(negative.constraint), True ) elif self.is_positive(): # foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0 return self._non_empty_term( self.constraint.intersect(other.constraint), True ) else: # not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0 return self._non_empty_term( self.constraint.union(other.constraint), False ) elif self.is_positive() != other.is_positive(): return self if self.is_positive() else other else: return def difference(self, other): # type: (Term) -> Term """ Returns a Term that represents packages allowed by this term and not by the other """ return self.intersect(other.inverse) def _compatible_dependency(self, other): return ( self.dependency.is_root or other.is_root or (other.name == self.dependency.name) ) def _non_empty_term(self, constraint, is_positive): if constraint.is_empty(): return return Term(Dependency(self.dependency.name, constraint), is_positive) def __str__(self): return "{}{}".format("not " if not self.is_positive() else "", self._dependency) def __repr__(self): return "".format(str(self)) PK!EE!poetry/mixology/version_solver.py# -*- coding: utf-8 -*- import time from typing import Dict from typing import List from typing import Union from poetry.packages import Dependency from poetry.packages import ProjectPackage from poetry.packages import Package from poetry.puzzle.provider import Provider from poetry.semver import Version from poetry.semver import VersionRange from .failure import SolveFailure from .incompatibility import Incompatibility from .incompatibility_cause import ConflictCause from .incompatibility_cause import NoVersionsCause from .incompatibility_cause import PackageNotFoundCause from .incompatibility_cause import RootCause from .partial_solution import PartialSolution from .result import SolverResult from .set_relation import SetRelation from .term import Term _conflict = object() class VersionSolver: """ The version solver that finds a set of package versions that satisfy the root package's dependencies. See https://github.com/dart-lang/pub/tree/master/doc/solver.md for details on how this solver works. """ def __init__( self, root, # type: ProjectPackage provider, # type: Provider locked=None, # type: Dict[str, Package] use_latest=None, # type: List[str] ): self._root = root self._provider = provider self._locked = locked or {} if use_latest is None: use_latest = [] self._use_latest = use_latest self._incompatibilities = {} # type: Dict[str, List[Incompatibility]] self._solution = PartialSolution() @property def solution(self): # type: () -> PartialSolution return self._solution def solve(self): # type: () -> SolverResult """ Finds a set of dependencies that match the root package's constraints, or raises an error if no such set is available. """ start = time.time() root_dependency = Dependency(self._root.name, self._root.version) root_dependency.is_root = True self._add_incompatibility( Incompatibility([Term(root_dependency, False)], RootCause()) ) try: next = self._root.name while next is not None: self._propagate(next) next = self._choose_package_version() return self._result() except Exception: raise finally: self._log( "Version solving took {:.3f} seconds.\n" "Tried {} solutions.".format( time.time() - start, self._solution.attempted_solutions ) ) def _propagate(self, package): # type: (str) -> None """ Performs unit propagation on incompatibilities transitively related to package to derive new assignments for _solution. """ changed = set() changed.add(package) while changed: package = changed.pop() # Iterate in reverse because conflict resolution tends to produce more # general incompatibilities as time goes on. If we look at those first, # we can derive stronger assignments sooner and more eagerly find # conflicts. for incompatibility in reversed(self._incompatibilities[package]): result = self._propagate_incompatibility(incompatibility) if result is _conflict: # If the incompatibility is satisfied by the solution, we use # _resolve_conflict() to determine the root cause of the conflict as a # new incompatibility. # # It also backjumps to a point in the solution # where that incompatibility will allow us to derive new assignments # that avoid the conflict. root_cause = self._resolve_conflict(incompatibility) # Back jumping erases all the assignments we did at the previous # decision level, so we clear [changed] and refill it with the # newly-propagated assignment. changed.clear() changed.add(str(self._propagate_incompatibility(root_cause))) break elif result is not None: changed.add(result) def _propagate_incompatibility( self, incompatibility ): # type: (Incompatibility) -> Union[str, _conflict, None] """ If incompatibility is almost satisfied by _solution, adds the negation of the unsatisfied term to _solution. If incompatibility is satisfied by _solution, returns _conflict. If incompatibility is almost satisfied by _solution, returns the unsatisfied term's package name. Otherwise, returns None. """ # The first entry in incompatibility.terms that's not yet satisfied by # _solution, if one exists. If we find more than one, _solution is # inconclusive for incompatibility and we can't deduce anything. unsatisfied = None for term in incompatibility.terms: relation = self._solution.relation(term) if relation == SetRelation.DISJOINT: # If term is already contradicted by _solution, then # incompatibility is contradicted as well and there's nothing new we # can deduce from it. return elif relation == SetRelation.OVERLAPPING: # If more than one term is inconclusive, we can't deduce anything about # incompatibility. if unsatisfied is not None: return # If exactly one term in incompatibility is inconclusive, then it's # almost satisfied and [term] is the unsatisfied term. We can add the # inverse of the term to _solution. unsatisfied = term # If *all* terms in incompatibility are satisfied by _solution, then # incompatibility is satisfied and we have a conflict. if unsatisfied is None: return _conflict self._log( "derived: {}{}".format( "not " if unsatisfied.is_positive() else "", unsatisfied.dependency ) ) self._solution.derive( unsatisfied.dependency, not unsatisfied.is_positive(), incompatibility ) return unsatisfied.dependency.name def _resolve_conflict( self, incompatibility ): # type: (Incompatibility) -> Incompatibility """ Given an incompatibility that's satisfied by _solution, The `conflict resolution`_ constructs a new incompatibility that encapsulates the root cause of the conflict and backtracks _solution until the new incompatibility will allow _propagate() to deduce new assignments. Adds the new incompatibility to _incompatibilities and returns it. .. _conflict resolution: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution """ self._log("conflict: {}".format(incompatibility)) new_incompatibility = False while not incompatibility.is_failure(): # The term in incompatibility.terms that was most recently satisfied by # _solution. most_recent_term = None # The earliest assignment in _solution such that incompatibility is # satisfied by _solution up to and including this assignment. most_recent_satisfier = None # The difference between most_recent_satisfier and most_recent_term; # that is, the versions that are allowed by most_recent_satisfier and not # by most_recent_term. This is None if most_recent_satisfier totally # satisfies most_recent_term. difference = None # The decision level of the earliest assignment in _solution *before* # most_recent_satisfier such that incompatibility is satisfied by # _solution up to and including this assignment plus # most_recent_satisfier. # # Decision level 1 is the level where the root package was selected. It's # safe to go back to decision level 0, but stopping at 1 tends to produce # better error messages, because references to the root package end up # closer to the final conclusion that no solution exists. previous_satisfier_level = 1 for term in incompatibility.terms: satisfier = self._solution.satisfier(term) if most_recent_satisfier is None: most_recent_term = term most_recent_satisfier = satisfier elif most_recent_satisfier.index < satisfier.index: previous_satisfier_level = max( previous_satisfier_level, most_recent_satisfier.decision_level ) most_recent_term = term most_recent_satisfier = satisfier difference = None else: previous_satisfier_level = max( previous_satisfier_level, satisfier.decision_level ) if most_recent_term == term: # If most_recent_satisfier doesn't satisfy most_recent_term on its # own, then the next-most-recent satisfier may be the one that # satisfies the remainder. difference = most_recent_satisfier.difference(most_recent_term) if difference is not None: previous_satisfier_level = max( previous_satisfier_level, self._solution.satisfier(difference.inverse).decision_level, ) # If most_recent_identifier is the only satisfier left at its decision # level, or if it has no cause (indicating that it's a decision rather # than a derivation), then incompatibility is the root cause. We then # backjump to previous_satisfier_level, where incompatibility is # guaranteed to allow _propagate to produce more assignments. if ( previous_satisfier_level < most_recent_satisfier.decision_level or most_recent_satisfier.cause is None ): self._solution.backtrack(previous_satisfier_level) if new_incompatibility: self._add_incompatibility(incompatibility) return incompatibility # Create a new incompatibility by combining incompatibility with the # incompatibility that caused most_recent_satisfier to be assigned. Doing # this iteratively constructs an incompatibility that's guaranteed to be # true (that is, we know for sure no solution will satisfy the # incompatibility) while also approximating the intuitive notion of the # "root cause" of the conflict. new_terms = [] for term in incompatibility.terms: if term != most_recent_term: new_terms.append(term) for term in most_recent_satisfier.cause.terms: if term.dependency != most_recent_satisfier.dependency: new_terms.append(term) # The most_recent_satisfier may not satisfy most_recent_term on its own # if there are a collection of constraints on most_recent_term that # only satisfy it together. For example, if most_recent_term is # `foo ^1.0.0` and _solution contains `[foo >=1.0.0, # foo <2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even # though it doesn't totally satisfy `foo ^1.0.0`. # # In this case, we add `not (most_recent_satisfier \ most_recent_term)` to # the incompatibility as well, See the `algorithm documentation`_ for # details. # # .. _algorithm documentation: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution if difference is not None: new_terms.append(difference.inverse) incompatibility = Incompatibility( new_terms, ConflictCause(incompatibility, most_recent_satisfier.cause) ) new_incompatibility = True partially = "" if difference is None else " partially" bang = "!" self._log( "{} {} is{} satisfied by {}".format( bang, most_recent_term, partially, most_recent_satisfier ) ) self._log( '{} which is caused by "{}"'.format(bang, most_recent_satisfier.cause) ) self._log("{} thus: {}".format(bang, incompatibility)) raise SolveFailure(incompatibility) def _choose_package_version(self): # type: () -> Union[str, None] """ Tries to select a version of a required package. Returns the name of the package whose incompatibilities should be propagated by _propagate(), or None indicating that version solving is complete and a solution has been found. """ unsatisfied = self._solution.unsatisfied if not unsatisfied: return # Prefer packages with as few remaining versions as possible, # so that if a conflict is necessary it's forced quickly. def _get_min(dependency): if dependency.name in self._use_latest: # If we're forced to use the latest version of a package, it effectively # only has one version to choose from. return 1 if dependency.name in self._locked: return 1 try: return len(self._provider.search_for(dependency)) except ValueError: return 0 if len(unsatisfied) == 1: dependency = unsatisfied[0] else: dependency = min(*unsatisfied, key=_get_min) locked = self._get_locked(dependency.name) if locked is None or not dependency.constraint.allows(locked.version): try: packages = self._provider.search_for(dependency) except ValueError as e: self._add_incompatibility( Incompatibility([Term(dependency, True)], PackageNotFoundCause(e)) ) return dependency.name try: version = packages[0] except IndexError: version = None else: version = locked if version is None: # If there are no versions that satisfy the constraint, # add an incompatibility that indicates that. self._add_incompatibility( Incompatibility([Term(dependency, True)], NoVersionsCause()) ) return dependency.name conflict = False for incompatibility in self._provider.incompatibilities_for(version): self._add_incompatibility(incompatibility) # If an incompatibility is already satisfied, then selecting version # would cause a conflict. # # We'll continue adding its dependencies, then go back to # unit propagation which will guide us to choose a better version. conflict = conflict or all( [ term.dependency.name == dependency.name or self._solution.satisfies(term) for term in incompatibility.terms ] ) if not conflict: self._solution.decide(version) self._log( "selecting {} ({})".format( version.name, version.full_pretty_version ) ) return dependency.name def _excludes_single_version(self, constraint): # type: (Any) -> bool return isinstance(VersionRange().difference(constraint), Version) def _result(self): # type: () -> SolverResult """ Creates a #SolverResult from the decisions in _solution """ decisions = self._solution.decisions return SolverResult( self._root, [p for p in decisions if not p.is_root()], self._solution.attempted_solutions, ) def _add_incompatibility(self, incompatibility): # type: (Incompatibility) -> None self._log("fact: {}".format(incompatibility)) for term in incompatibility.terms: if term.dependency.name not in self._incompatibilities: self._incompatibilities[term.dependency.name] = [] if incompatibility in self._incompatibilities[term.dependency.name]: continue self._incompatibilities[term.dependency.name].append(incompatibility) def _get_locked(self, package_name): # type: (str) -> Union[Package, None] if package_name in self._use_latest: return locked = self._locked.get(package_name) if not locked: return for dep in self._root.all_requires: if dep.name == locked.name: locked.requires_extras = dep.extras return locked def _log(self, text): self._provider.debug(text, self._solution.attempted_solutions) PK!rRpoetry/packages/__init__.pyimport os import re from poetry.version.requirements import Requirement from .dependency import Dependency from .directory_dependency import DirectoryDependency from .file_dependency import FileDependency from .locker import Locker from .package import Package from .project_package import ProjectPackage from .utils.link import Link from .utils.utils import convert_markers from .utils.utils import group_markers from .utils.utils import is_archive_file from .utils.utils import is_installable_dir from .utils.utils import is_url from .utils.utils import path_to_url from .utils.utils import strip_extras from .vcs_dependency import VCSDependency def dependency_from_pep_508(name): # Removing comments parts = name.split("#", 1) name = parts[0].strip() if len(parts) > 1: rest = parts[1] if ";" in rest: name += ";" + rest.split(";", 1)[1] req = Requirement(name) if req.marker: markers = convert_markers(req.marker.markers) else: markers = {} name = req.name path = os.path.normpath(os.path.abspath(name)) link = None if is_url(name): link = Link(name) else: p, extras = strip_extras(path) if os.path.isdir(p) and (os.path.sep in name or name.startswith(".")): if not is_installable_dir(p): raise ValueError( "Directory {!r} is not installable. File 'setup.py' " "not found.".format(name) ) link = Link(path_to_url(p)) elif is_archive_file(p): link = Link(path_to_url(p)) # it's a local file, dir, or url if link: # Handle relative file URLs if link.scheme == "file" and re.search(r"\.\./", link.url): link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path)))) # wheel file if link.is_wheel: m = re.match("^(?P(?P.+?)-(?P\d.*?))", link.filename) if not m: raise ValueError("Invalid wheel name: {}".format(link.filename)) name = m.group("name") version = m.group("ver") dep = Dependency(name, version) else: name = link.egg_fragment if link.scheme == "git": dep = VCSDependency(name, "git", link.url_without_fragment) else: dep = Dependency(name, "*") else: if req.pretty_constraint: constraint = req.constraint else: constraint = "*" dep = Dependency(name, constraint) if "extra" in markers: # If we have extras, the dependency is optional dep.deactivate() for or_ in markers["extra"]: for _, extra in or_: dep.extras.append(extra) if "python_version" in markers: ors = [] for or_ in markers["python_version"]: ands = [] for op, version in or_: # Expand python version if op == "==": version = "~" + version op = "" elif op == "!=": version += ".*" elif op == "in": versions = [] for v in re.split("[ ,]+", version): split = v.split(".") if len(split) in [1, 2]: split.append("*") op = "" else: op = "==" versions.append(op + ".".join(split)) if versions: ands.append(" || ".join(versions)) continue ands.append("{}{}".format(op, version)) ors.append(" ".join(ands)) dep.python_versions = " || ".join(ors) if "sys_platform" in markers: ors = [] for or_ in markers["sys_platform"]: ands = [] for op, platform in or_: if op == "==": op = "" elif op == "in": platforms = [] for v in re.split("[ ,]+", platform): platforms.append(v) if platforms: ands.append(" || ".join(platforms)) continue ands.append("{}{}".format(op, platform)) ors.append(" ".join(ands)) dep.platform = " || ".join(ors) # Extras for extra in req.extras: dep.extras.append(extra) return dep PK!'poetry/packages/constraints/__init__.pyPK!252.poetry/packages/constraints/base_constraint.pyclass BaseConstraint(object): def matches(self, provider): raise NotImplementedError() def allows_all(self, other): raise NotImplementedError() def allows_any(self, other): raise NotImplementedError() def difference(self, other): raise NotImplementedError() def intersect(self, other): raise NotImplementedError() def is_empty(self): return False PK!q/poetry/packages/constraints/empty_constraint.pyfrom .base_constraint import BaseConstraint class EmptyConstraint(BaseConstraint): pretty_string = None def matches(self, _): return True def is_empty(self): return True def allows_all(self, other): return True def allows_any(self, other): return True def intersect(self, other): return other def difference(self, other): return def __str__(self): return "*" PK!"31poetry/packages/constraints/generic_constraint.pyimport operator import re from .base_constraint import BaseConstraint from .empty_constraint import EmptyConstraint from .multi_constraint import MultiConstraint class GenericConstraint(BaseConstraint): """ Represents a generic constraint. This is particularly useful for platform/system/os/extra constraints. """ OP_EQ = operator.eq OP_NE = operator.ne _trans_op_str = {"=": OP_EQ, "==": OP_EQ, "!=": OP_NE} _trans_op_int = {OP_EQ: "==", OP_NE: "!="} def __init__(self, operator, version): if operator not in self._trans_op_str: raise ValueError( 'Invalid operator "{}" given, ' "expected one of: {}".format( operator, ", ".join(self.supported_operators) ) ) self._operator = self._trans_op_str[operator] self._string_operator = self._trans_op_int[self._operator] self._version = version @property def supported_operators(self): return list(self._trans_op_str.keys()) @property def operator(self): return self._operator @property def string_operator(self): return self._string_operator @property def version(self): return self._version def matches(self, provider): if not isinstance(provider, GenericConstraint): return provider.matches(self) is_equal_op = self.OP_EQ is self._operator is_non_equal_op = self.OP_NE is self._operator is_provider_equal_op = self.OP_EQ is provider.operator is_provider_non_equal_op = self.OP_NE is provider.operator if ( is_equal_op and is_provider_equal_op or is_non_equal_op and is_provider_non_equal_op ): return self._version == provider.version if ( is_equal_op and is_provider_non_equal_op or is_non_equal_op and is_provider_equal_op ): return self._version != provider.version return False @classmethod def parse(cls, constraints): """ Parses a constraint string into MultiConstraint and/or PlatformConstraint objects. """ pretty_constraint = constraints or_constraints = re.split("\s*\|\|?\s*", constraints.strip()) or_groups = [] for constraints in or_constraints: and_constraints = re.split( "(? 1: constraint_objects = [] for constraint in and_constraints: for parsed_constraint in cls._parse_constraint(constraint): constraint_objects.append(parsed_constraint) else: constraint_objects = cls._parse_constraint(and_constraints[0]) if len(constraint_objects) == 1: constraint = constraint_objects[0] else: constraint = MultiConstraint(constraint_objects) or_groups.append(constraint) if len(or_groups) == 1: constraint = or_groups[0] else: constraint = MultiConstraint(or_groups, False) constraint.pretty_string = pretty_constraint return constraint @classmethod def _parse_constraint(cls, constraint): m = re.match("(?i)^v?[xX*](\.[xX*])*$", constraint) if m: return (EmptyConstraint(),) # Basic Comparators m = re.match("^(!=|==?)?\s*(.*)", constraint) if m: return (GenericConstraint(m.group(1) or "=", m.group(2)),) raise ValueError("Could not parse generic constraint: {}".format(constraint)) def __str__(self): op = self._trans_op_int[self._operator] if op == "==": op = "" else: op = op + " " return "{}{}".format(op, self._version) def __repr__(self): return "".format(str(self)) PK!a/poetry/packages/constraints/multi_constraint.pyfrom .base_constraint import BaseConstraint class MultiConstraint(BaseConstraint): def __init__(self, constraints, conjunctive=True): self._constraints = tuple(constraints) self._conjunctive = conjunctive @property def constraints(self): return self._constraints def is_conjunctive(self): return self._conjunctive def is_disjunctive(self): return not self._conjunctive def matches(self, provider): if self.is_disjunctive(): for constraint in self._constraints: if constraint.matches(provider): return True return False for constraint in self._constraints: if not constraint.matches(provider): return False return True def __str__(self): constraints = [] for constraint in self._constraints: constraints.append(str(constraint)) return "{}".format((", " if self._conjunctive else " || ").join(constraints)) PK!-EE2poetry/packages/constraints/wildcard_constraint.pyimport re from .constraint import Constraint class WilcardConstraint(Constraint): def __init__(self, constraint): # type: (str) -> None m = re.match( "^(!= ?|==)?v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.[xX*])+$", constraint ) if not m: raise ValueError("Invalid value for wildcard constraint") if not m.group(1): operator = "==" else: operator = m.group(1).strip() super(WilcardConstraint, self).__init__( operator, ".".join([g if g else "*" for g in m.groups()[1:]]) ) if m.group(4): position = 2 elif m.group(3): position = 1 else: position = 0 from ..version_parser import VersionParser parser = VersionParser() groups = m.groups()[1:] low_version = parser._manipulate_version_string(groups, position) high_version = parser._manipulate_version_string(groups, position, 1) if operator == "!=": if low_version == "0.0.0.0": self._constraint = Constraint(">=", high_version) else: self._constraint = parser.parse_constraints( "<{} || >={}".format(low_version, high_version) ) else: if low_version == "0.0.0.0": self._constraint = Constraint("<", high_version) else: self._constraint = parser.parse_constraints( ">={},<{}".format(low_version, high_version) ) @property def supported_operators(self): return ["!=", "=="] @property def constraint(self): return self._constraint def matches(self, provider): # type: (Constraint) -> bool if isinstance(provider, self.__class__): return self._constraint.matches(provider.constraint) return provider.matches(self._constraint) def __str__(self): op = "" if self.string_operator == "!=": op = "!= " return "{}{}".format(op, self._version) PK!2-t""poetry/packages/dependency.pyimport poetry.packages from poetry.semver import parse_constraint from poetry.semver import Version from poetry.semver import VersionConstraint from poetry.semver import VersionUnion from poetry.utils.helpers import canonicalize_name from .constraints.empty_constraint import EmptyConstraint from .constraints.generic_constraint import GenericConstraint from .constraints.multi_constraint import MultiConstraint class Dependency(object): def __init__( self, name, # type: str constraint, # type: str optional=False, # type: bool category="main", # type: str allows_prereleases=False, # type: bool ): self._name = canonicalize_name(name) self._pretty_name = name try: if not isinstance(constraint, VersionConstraint): self._constraint = parse_constraint(constraint) else: self._constraint = constraint except ValueError: self._constraint = parse_constraint("*") self._pretty_constraint = str(constraint) self._optional = optional self._category = category self._allows_prereleases = allows_prereleases self._python_versions = "*" self._python_constraint = parse_constraint("*") self._platform = "*" self._platform_constraint = EmptyConstraint() self._extras = [] self._in_extras = [] self._activated = not self._optional self.is_root = False @property def name(self): return self._name @property def constraint(self): return self._constraint @property def pretty_constraint(self): return self._pretty_constraint @property def pretty_name(self): return self._pretty_name @property def category(self): return self._category @property def python_versions(self): return self._python_versions @python_versions.setter def python_versions(self, value): self._python_versions = value self._python_constraint = parse_constraint(value) @property def python_constraint(self): return self._python_constraint @property def platform(self): return self._platform @platform.setter def platform(self, value): self._platform = value self._platform_constraint = GenericConstraint.parse(value) @property def platform_constraint(self): return self._platform_constraint @property def extras(self): # type: () -> list return self._extras @property def in_extras(self): # type: () -> list return self._in_extras def allows_prereleases(self): return self._allows_prereleases def is_optional(self): return self._optional def is_activated(self): return self._activated def is_vcs(self): return False def is_file(self): return False def is_directory(self): return False def accepts(self, package): # type: (poetry.packages.Package) -> bool """ Determines if the given package matches this dependency. """ return ( self._name == package.name and self._constraint.allows(package.version) and (not package.is_prerelease() or self.allows_prereleases()) ) def to_pep_508(self, with_extras=True): # type: (bool) -> str requirement = self.pretty_name if self.extras: requirement += "[{}]".format(",".join(self.extras)) if isinstance(self.constraint, VersionUnion): requirement += " ({})".format( ",".join([str(c).replace(" ", "") for c in self.constraint.ranges]) ) elif isinstance(self.constraint, Version): requirement += " (=={})".format(self.constraint.text) elif not self.constraint.is_any(): requirement += " ({})".format(str(self.constraint).replace(" ", "")) # Markers markers = [] # Python marker if self.python_versions != "*": python_constraint = self.python_constraint markers.append( self._create_nested_marker("python_version", python_constraint) ) if self.platform != "*": platform_constraint = self.platform_constraint markers.append( self._create_nested_marker("sys_platform", platform_constraint) ) in_extras = " || ".join(self._in_extras) if in_extras and with_extras: markers.append( self._create_nested_marker("extra", GenericConstraint.parse(in_extras)) ) if markers: if len(markers) > 1: markers = ["({})".format(m) for m in markers] requirement += "; {}".format(" and ".join(markers)) else: requirement += "; {}".format(markers[0]) return requirement def _create_nested_marker(self, name, constraint): if isinstance(constraint, MultiConstraint): parts = [] for c in constraint.constraints: multi = False if isinstance(c, MultiConstraint): multi = True parts.append((multi, self._create_nested_marker(name, c))) glue = " and " if constraint.is_disjunctive(): parts = [ "({})".format(part[1]) if part[0] else part[1] for part in parts ] glue = " or " else: parts = [part[1] for part in parts] marker = glue.join(parts) elif isinstance(constraint, GenericConstraint): marker = '{} {} "{}"'.format( name, constraint.string_operator, constraint.version ) elif isinstance(constraint, VersionUnion): parts = [] for c in constraint.ranges: parts.append(self._create_nested_marker(name, c)) glue = " or " parts = ["({})".format(part) for part in parts] marker = glue.join(parts) elif isinstance(constraint, Version): marker = '{} == "{}"'.format(name, constraint.text) else: if constraint.min is not None: op = ">=" if not constraint.include_min: op = ">" version = constraint.min.text if constraint.max is not None: text = '{} {} "{}"'.format(name, op, version) op = "<=" if not constraint.include_max: op = "<" version = constraint.max text += ' and {} {} "{}"'.format(name, op, version) return text elif constraint.max is not None: op = "<=" if not constraint.include_max: op = "<" version = constraint.max else: return "" marker = '{} {} "{}"'.format(name, op, version) return marker def activate(self): """ Set the dependency as mandatory. """ self._activated = True def deactivate(self): """ Set the dependency as optional. """ if not self._optional: self._optional = True self._activated = False def with_constraint(self, constraint): new = Dependency( self.pretty_name, constraint, optional=self.is_optional(), category=self.category, allows_prereleases=self.allows_prereleases(), ) new.is_root = self.is_root new.python_versions = self.python_versions new.platform = self.platform for extra in self.extras: new.extras.append(extra) for in_extra in self.in_extras: new.in_extras.append(in_extra) return new def __eq__(self, other): if not isinstance(other, Dependency): return NotImplemented return self._name == other.name and self._constraint == other.constraint def __ne__(self, other): return not self == other def __hash__(self): return hash((self._name, self._pretty_constraint)) def __str__(self): if self.is_root: return self._pretty_name return "{} ({})".format(self._pretty_name, self._pretty_constraint) def __repr__(self): return "<{} {}>".format(self.__class__.__name__, str(self)) PK!rjii'poetry/packages/directory_dependency.pyimport os import pkginfo from pkginfo.distribution import HEADER_ATTRS from pkginfo.distribution import HEADER_ATTRS_2_0 from poetry.io import NullIO from poetry.utils._compat import Path from poetry.utils._compat import decode from poetry.utils.helpers import parse_requires from poetry.utils.toml_file import TomlFile from poetry.utils.venv import NullVenv from poetry.utils.venv import Venv from .dependency import Dependency # Patching pkginfo to support Metadata version 2.1 (PEP 566) HEADER_ATTRS.update( {"2.1": HEADER_ATTRS_2_0 + (("Provides-Extra", "provides_extra", True),)} ) class DirectoryDependency(Dependency): def __init__( self, path, # type: Path category="main", # type: str optional=False, # type: bool base=None, # type: Path develop=False, # type: bool ): from . import dependency_from_pep_508 from .package import Package self._path = path self._base = base self._full_path = path self._develop = develop if self._base and not self._path.is_absolute(): self._full_path = self._base / self._path if not self._full_path.exists(): raise ValueError("Directory {} does not exist".format(self._path)) if self._full_path.is_file(): raise ValueError("{} is a file, expected a directory".format(self._path)) # Checking content to dertermine actions setup = self._full_path / "setup.py" pyproject = TomlFile(self._full_path / "pyproject.toml") has_poetry = False if pyproject.exists(): pyproject_content = pyproject.read(True) has_poetry = ( "tool" in pyproject_content and "poetry" in pyproject_content["tool"] ) if not setup.exists() and not has_poetry: raise ValueError( "Directory {} does not seem to be a Python package".format( self._full_path ) ) if has_poetry: from poetry.masonry.builders import SdistBuilder from poetry.poetry import Poetry poetry = Poetry.create(self._full_path) builder = SdistBuilder(poetry, NullVenv(), NullIO()) with setup.open("w") as f: f.write(decode(builder.build_setup())) package = poetry.package self._package = Package(package.pretty_name, package.version) self._package.requires += package.requires self._package.dev_requires += package.dev_requires self._package.python_versions = package.python_versions self._package.platform = package.platform else: # Execute egg_info current_dir = os.getcwd() os.chdir(str(self._full_path)) try: cwd = base venv = Venv.create(NullIO(), cwd=cwd) venv.run("python", "setup.py", "egg_info") finally: os.chdir(current_dir) egg_info = list(self._full_path.glob("*.egg-info"))[0] meta = pkginfo.UnpackedSDist(str(egg_info)) if meta.requires_dist: reqs = list(meta.requires_dist) else: reqs = [] requires = egg_info / "requires.txt" if requires.exists(): with requires.open() as f: reqs = parse_requires(f.read()) package = Package(meta.name, meta.version) package.description = meta.summary for req in reqs: package.requires.append(dependency_from_pep_508(req)) if meta.requires_python: package.python_versions = meta.requires_python if meta.platforms: platforms = [p for p in meta.platforms if p.lower() != "unknown"] if platforms: package.platform = " || ".join(platforms) self._package = package self._package.source_type = "directory" self._package.source_url = str(self._path) super(DirectoryDependency, self).__init__( self._package.name, self._package.version, category=category, optional=optional, allows_prereleases=True, ) @property def path(self): return self._path @property def full_path(self): return self._full_path.resolve() @property def package(self): return self._package @property def develop(self): return self._develop def is_directory(self): return True PK!h["poetry/packages/file_dependency.pyimport hashlib import io import pkginfo from pkginfo.distribution import HEADER_ATTRS from pkginfo.distribution import HEADER_ATTRS_2_0 from poetry.utils._compat import Path from .dependency import Dependency # Patching pkginfo to support Metadata version 2.1 (PEP 566) HEADER_ATTRS.update( {"2.1": HEADER_ATTRS_2_0 + (("Provides-Extra", "provides_extra", True),)} ) class FileDependency(Dependency): def __init__( self, path, # type: Path category="main", # type: str optional=False, # type: bool base=None, # type: Path ): self._path = path self._base = base self._full_path = path if self._base and not self._path.is_absolute(): self._full_path = self._base / self._path if not self._full_path.exists(): raise ValueError("File {} does not exist".format(self._path)) if self._full_path.is_dir(): raise ValueError("{} is a directory, expected a file".format(self._path)) if self._path.suffix == ".whl": self._meta = pkginfo.Wheel(str(self._full_path)) else: # Assume sdist self._meta = pkginfo.SDist(str(self._full_path)) super(FileDependency, self).__init__( self._meta.name, self._meta.version, category=category, optional=optional, allows_prereleases=True, ) @property def path(self): return self._path @property def full_path(self): return self._full_path.resolve() @property def metadata(self): return self._meta def is_file(self): return True def hash(self): h = hashlib.sha256() with self._full_path.open("rb") as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""): h.update(content) return h.hexdigest() PK!شpoetry/packages/locker.pyimport json import poetry.packages import poetry.repositories from hashlib import sha256 from typing import List from poetry.utils._compat import Path from poetry.utils.toml_file import TomlFile class Locker: _relevant_keys = ["dependencies", "dev-dependencies", "source"] def __init__(self, lock, local_config): # type: (Path, dict) -> None self._lock = TomlFile(lock) self._local_config = local_config self._lock_data = None self._content_hash = self._get_content_hash() @property def lock(self): # type: () -> TomlFile return self._lock @property def lock_data(self): if self._lock_data is None: self._lock_data = self._get_lock_data() return self._lock_data def is_locked(self): # type: () -> bool """ Checks whether the locker has been locked (lockfile found). """ if not self._lock.exists(): return False return "package" in self.lock_data def is_fresh(self): # type: () -> bool """ Checks whether the lock file is still up to date with the current hash. """ lock = self._lock.read(True) metadata = lock.get("metadata", {}) if "content-hash" in metadata: return self._content_hash == lock["metadata"]["content-hash"] return False def locked_repository( self, with_dev_reqs=False ): # type: (bool) -> poetry.repositories.Repository """ Searches and returns a repository of locked packages. """ if not self.is_locked(): return poetry.repositories.Repository() lock_data = self.lock_data packages = poetry.repositories.Repository() if with_dev_reqs: locked_packages = lock_data["package"] else: locked_packages = [ p for p in lock_data["package"] if p["category"] == "main" ] if not locked_packages: return packages for info in locked_packages: package = poetry.packages.Package( info["name"], info["version"], info["version"] ) package.description = info.get("description", "") package.category = info["category"] package.optional = info["optional"] package.hashes = lock_data["metadata"]["hashes"][info["name"]] package.python_versions = info["python-versions"] for dep_name, constraint in info.get("dependencies", {}).items(): package.add_dependency(dep_name, constraint) if "requirements" in info: package.requirements = info["requirements"] if "source" in info: package.source_type = info["source"]["type"] package.source_url = info["source"]["url"] package.source_reference = info["source"]["reference"] packages.add_package(package) return packages def set_lock_data(self, root, packages): # type: () -> bool hashes = {} packages = self._lock_packages(packages) # Retrieving hashes for package in packages: hashes[package["name"]] = package["hashes"] del package["hashes"] lock = { "package": packages, "metadata": { "python-versions": root.python_versions, "platform": root.platform, "content-hash": self._content_hash, "hashes": hashes, }, } if root.extras: lock["extras"] = { extra: [dep.pretty_name for dep in deps] for extra, deps in root.extras.items() } if not self.is_locked() or lock != self.lock_data: self._write_lock_data(lock) return True return False def _write_lock_data(self, data): self._lock.write(data) self._lock_data = None def _get_content_hash(self): # type: () -> str """ Returns the sha256 hash of the sorted content of the composer file. """ content = self._local_config relevant_content = {} for key in self._relevant_keys: relevant_content[key] = content.get(key) content_hash = sha256( json.dumps(relevant_content, sort_keys=True).encode() ).hexdigest() return content_hash def _get_lock_data(self): # type: () -> dict if not self._lock.exists(): raise RuntimeError("No lockfile found. Unable to read locked packages") return self._lock.read(True) def _lock_packages( self, packages ): # type: (List['poetry.packages.Package']) -> list locked = [] for package in sorted(packages, key=lambda x: x.name): spec = self._dump_package(package) locked.append(spec) return locked def _dump_package(self, package): # type: (poetry.packages.Package) -> dict dependencies = {} for dependency in package.requires: if dependency.is_optional() and not dependency.is_activated(): continue dependencies[dependency.pretty_name] = str(dependency.pretty_constraint) data = { "name": package.pretty_name, "version": package.pretty_version, "description": package.description, "category": package.category, "optional": package.optional, "python-versions": package.python_versions, "platform": package.platform, "hashes": package.hashes, "dependencies": dependencies, } if package.source_type: data["source"] = { "type": package.source_type, "url": package.source_url, "reference": package.source_reference, } if package.requirements: data["requirements"] = package.requirements return data PK!OA$$poetry/packages/package.py# -*- coding: utf-8 -*- import copy import re from typing import Union from poetry.semver import Version from poetry.semver import parse_constraint from poetry.spdx import license_by_id from poetry.spdx import License from poetry.utils._compat import Path from poetry.utils.helpers import canonicalize_name from .constraints.empty_constraint import EmptyConstraint from .constraints.generic_constraint import GenericConstraint from .dependency import Dependency from .directory_dependency import DirectoryDependency from .file_dependency import FileDependency from .vcs_dependency import VCSDependency AUTHOR_REGEX = re.compile("(?u)^(?P[- .,\w\d'’\"()]+)(?: <(?P.+?)>)?$") class Package(object): AVAILABLE_PYTHONS = {"2", "2.7", "3", "3.4", "3.5", "3.6", "3.7"} supported_link_types = { "require": {"description": "requires", "method": "requires"}, "provide": {"description": "provides", "method": "provides"}, } def __init__(self, name, version, pretty_version=None): """ Creates a new in memory package. """ self._pretty_name = name self._name = canonicalize_name(name) if not isinstance(version, Version): self._version = Version.parse(version) self._pretty_version = pretty_version or version else: self._version = version self._pretty_version = pretty_version or self._version.text self.description = "" self._authors = [] self.homepage = None self.repository_url = None self.keywords = [] self._license = None self.readme = None self.source_type = "" self.source_reference = "" self.source_url = "" self.requires = [] self.dev_requires = [] self.extras = {} self.requires_extras = [] self.category = "main" self.hashes = [] self.optional = False # Requirements for making it mandatory self.requirements = {} self.build = None self.include = [] self.exclude = [] self.classifiers = [] self._python_versions = "*" self._python_constraint = parse_constraint("*") self._platform = "*" self._platform_constraint = EmptyConstraint() self.root_dir = None self.develop = False @property def name(self): return self._name @property def pretty_name(self): return self._pretty_name @property def version(self): return self._version @property def pretty_version(self): return self._pretty_version @property def unique_name(self): if self.is_root(): return self._name return self.name + "-" + self._version.text @property def pretty_string(self): return self.pretty_name + " " + self.pretty_version @property def full_pretty_version(self): if self.source_type not in ["hg", "git"]: return self._pretty_version # if source reference is a sha1 hash -- truncate if len(self.source_reference) == 40: return "{} {}".format(self._pretty_version, self.source_reference[0:7]) return "{} {}".format(self._pretty_version, self.source_reference) @property def authors(self): # type: () -> list return self._authors @property def author_name(self): # type: () -> str return self._get_author()["name"] @property def author_email(self): # type: () -> str return self._get_author()["email"] @property def all_requires(self): return self.requires + self.dev_requires def _get_author(self): # type: () -> dict if not self._authors: return {"name": None, "email": None} m = AUTHOR_REGEX.match(self._authors[0]) name = m.group("name") email = m.group("email") return {"name": name, "email": email} @property def python_versions(self): return self._python_versions @python_versions.setter def python_versions(self, value): self._python_versions = value self._python_constraint = parse_constraint(value) @property def python_constraint(self): return self._python_constraint @property def platform(self): # type: () -> str return self._platform @platform.setter def platform(self, value): # type: (str) -> None self._platform = value self._platform_constraint = GenericConstraint.parse(value) @property def platform_constraint(self): return self._platform_constraint @property def license(self): return self._license @license.setter def license(self, value): if value is None: self._license = value elif isinstance(value, License): self._license = value else: self._license = license_by_id(value) @property def all_classifiers(self): classifiers = copy.copy(self.classifiers) # Automatically set python classifiers if self.python_versions == "*": python_constraint = parse_constraint("~2.7 || ^3.4") else: python_constraint = self.python_constraint for version in sorted(self.AVAILABLE_PYTHONS): if len(version) == 1: constraint = parse_constraint(version + ".*") else: constraint = Version.parse(version) if python_constraint.allows_any(constraint): classifiers.append( "Programming Language :: Python :: {}".format(version) ) # Automatically set license classifiers if self.license: classifiers.append(self.license.classifier) classifiers = set(classifiers) return sorted(classifiers) def is_prerelease(self): return self._version.is_prerelease() def is_root(self): return False def add_dependency( self, name, # type: str constraint=None, # type: Union[str, dict, None] category="main", # type: str ): # type: (...) -> Dependency if constraint is None: constraint = "*" if isinstance(constraint, dict): optional = constraint.get("optional", False) python_versions = constraint.get("python") platform = constraint.get("platform") allows_prereleases = constraint.get("allows-prereleases", False) if "git" in constraint: # VCS dependency dependency = VCSDependency( name, "git", constraint["git"], branch=constraint.get("branch", None), tag=constraint.get("tag", None), rev=constraint.get("rev", None), optional=optional, ) elif "file" in constraint: file_path = Path(constraint["file"]) dependency = FileDependency( file_path, category=category, base=self.root_dir ) elif "path" in constraint: path = Path(constraint["path"]) if self.root_dir: is_file = (self.root_dir / path).is_file() else: is_file = path.is_file() if is_file: dependency = FileDependency( path, category=category, optional=optional, base=self.root_dir ) else: dependency = DirectoryDependency( path, category=category, optional=optional, base=self.root_dir, develop=constraint.get("develop", False), ) else: version = constraint["version"] dependency = Dependency( name, version, optional=optional, category=category, allows_prereleases=allows_prereleases, ) if python_versions: dependency.python_versions = python_versions if platform: dependency.platform = platform if "extras" in constraint: for extra in constraint["extras"]: dependency.extras.append(extra) else: dependency = Dependency(name, constraint, category=category) if category == "dev": self.dev_requires.append(dependency) else: self.requires.append(dependency) return dependency def to_dependency(self): return Dependency(self.name, self._version) def __hash__(self): return hash((self._name, self._version)) def __eq__(self, other): if not isinstance(other, Package): return NotImplemented return self._name == other.name and self._version == other.version def __str__(self): return self.unique_name def __repr__(self): return "".format(self.unique_name) PK!zm"poetry/packages/project_package.pyfrom .package import Package class ProjectPackage(Package): def is_root(self): return True def to_dependency(self): dependency = super(ProjectPackage, self).to_dependency() dependency.is_root = True return dependency PK!!poetry/packages/utils/__init__.pyPK!1poetry/packages/utils/link.pyimport posixpath try: import urllib.parse as urlparse except ImportError: import urlparse import re from .utils import path_to_url from .utils import splitext class Link: def __init__(self, url, comes_from=None, requires_python=None): """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) comes_from: instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. """ # url can be a UNC windows share if url.startswith("\\\\"): url = path_to_url(url) self.url = url self.comes_from = comes_from self.requires_python = requires_python if requires_python else None def __str__(self): if self.requires_python: rp = " (requires-python:%s)" % self.requires_python else: rp = "" if self.comes_from: return "%s (from %s)%s" % (self.url, self.comes_from, rp) else: return str(self.url) def __repr__(self): return "" % self def __eq__(self, other): if not isinstance(other, Link): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Link): return NotImplemented return self.url != other.url def __lt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url < other.url def __le__(self, other): if not isinstance(other, Link): return NotImplemented return self.url <= other.url def __gt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url > other.url def __ge__(self, other): if not isinstance(other, Link): return NotImplemented return self.url >= other.url def __hash__(self): return hash(self.url) @property def filename(self): _, netloc, path, _, _ = urlparse.urlsplit(self.url) name = posixpath.basename(path.rstrip("/")) or netloc name = urlparse.unquote(name) assert name, "URL %r produced no filename" % self.url return name @property def scheme(self): return urlparse.urlsplit(self.url)[0] @property def netloc(self): return urlparse.urlsplit(self.url)[1] @property def path(self): return urlparse.unquote(urlparse.urlsplit(self.url)[2]) def splitext(self): return splitext(posixpath.basename(self.path.rstrip("/"))) @property def ext(self): return self.splitext()[1] @property def url_without_fragment(self): scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url) return urlparse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") @property def subdirectory_fragment(self): match = self._subdirectory_fragment_re.search(self.url) if not match: return None return match.group(1) _hash_re = re.compile(r"(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)") @property def hash(self): match = self._hash_re.search(self.url) if match: return match.group(2) return None @property def hash_name(self): match = self._hash_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split("#", 1)[0].split("?", 1)[0]) @property def is_wheel(self): return self.ext == ".whl" @property def is_artifact(self): """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ if self.scheme in ["ssh", "git", "hg", "bzr", "sftp", "svn"]: return False return True PK!)~ ~ poetry/packages/utils/utils.pyimport os import posixpath import re try: import urllib.parse as urlparse except ImportError: import urlparse try: import urllib.request as urllib2 except ImportError: import urllib2 BZ2_EXTENSIONS = (".tar.bz2", ".tbz") XZ_EXTENSIONS = (".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma") ZIP_EXTENSIONS = (".zip", ".whl") TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar") ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS try: import bz2 # noqa SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS except ImportError: pass try: # Only for Python 3.3+ import lzma # noqa SUPPORTED_EXTENSIONS += XZ_EXTENSIONS except ImportError: pass def path_to_url(path): """ Convert a path to a file: URL. The path will be made absolute and have quoted path parts. """ path = os.path.normpath(os.path.abspath(path)) url = urlparse.urljoin("file:", urllib2.pathname2url(path)) return url def is_url(name): if ":" not in name: return False scheme = name.split(":", 1)[0].lower() return scheme in [ "http", "https", "file", "ftp", "ssh", "git", "hg", "bzr", "sftp", "svn" "ssh", ] def strip_extras(path): m = re.match(r"^(.+)(\[[^\]]+\])$", path) extras = None if m: path_no_extras = m.group(1) extras = m.group(2) else: path_no_extras = path return path_no_extras, extras def is_installable_dir(path): """Return True if `path` is a directory containing a setup.py file.""" if not os.path.isdir(path): return False setup_py = os.path.join(path, "setup.py") if os.path.isfile(setup_py): return True return False def is_archive_file(name): """Return True if `name` is a considered as an archive file.""" ext = splitext(name)[1].lower() if ext in ARCHIVE_EXTENSIONS: return True return False def splitext(path): """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith(".tar"): ext = base[-4:] + ext base = base[:-4] return base, ext def group_markers(markers): groups = [[]] for marker in markers: assert isinstance(marker, (list, tuple, str)) if isinstance(marker, list): groups[-1].append(group_markers(marker)) elif isinstance(marker, tuple): lhs, op, rhs = marker groups[-1].append((lhs.value, op, rhs.value)) else: assert marker in ["and", "or"] if marker == "or": groups.append([]) return groups def convert_markers(markers): groups = group_markers(markers) requirements = {} def _group(_groups, or_=False): for group in _groups: if isinstance(group, tuple): variable, op, value = group group_name = str(variable) if group_name not in requirements: requirements[group_name] = [[]] elif or_: requirements[group_name].append([]) or_ = False requirements[group_name][-1].append((str(op), str(value))) else: _group(group, or_=True) _group(groups) return requirements PK!ٺg!poetry/packages/vcs_dependency.pyfrom .dependency import Dependency class VCSDependency(Dependency): """ Represents a VCS dependency """ def __init__( self, name, vcs, source, branch=None, tag=None, rev=None, optional=False ): self._vcs = vcs self._source = source if not any([branch, tag, rev]): # If nothing has been specified, we assume master branch = "master" self._branch = branch self._tag = tag self._rev = rev super(VCSDependency, self).__init__( name, "*", optional=optional, allows_prereleases=True ) @property def vcs(self): return self._vcs @property def source(self): return self._source @property def branch(self): return self._branch @property def tag(self): return self._tag @property def rev(self): return self._rev @property def reference(self): # type: () -> str return self._branch or self._tag or self._rev @property def pretty_constraint(self): # type: () -> str if self._branch: what = "branch" version = self._branch elif self._tag: what = "tag" version = self._tag else: what = "rev" version = self._rev return "{} {}".format(what, version) def is_vcs(self): # type: () -> bool return True def accepts_prereleases(self): return True PK!poetry/poetry.pyfrom __future__ import absolute_import from __future__ import unicode_literals import json import jsonschema from .__version__ import __version__ from .config import Config from .exceptions import InvalidProjectFile from .packages import Dependency from .packages import Locker from .packages import Package from .packages import ProjectPackage from .repositories import Pool from .repositories.pypi_repository import PyPiRepository from .spdx import license_by_id from .utils._compat import Path from .utils.toml_file import TomlFile class Poetry: VERSION = __version__ def __init__( self, file, # type: Path local_config, # type: dict package, # type: Package locker, # type: Locker ): self._file = TomlFile(file) self._package = package self._local_config = local_config self._locker = locker self._config = Config.create("config.toml") # Configure sources self._pool = Pool() for source in self._local_config.get("source", []): self._pool.configure(source) # Always put PyPI last to prefere private repositories self._pool.add_repository( PyPiRepository( fallback=self._config.setting("settings.pypi.fallback", True) ) ) @property def file(self): return self._file @property def package(self): # type: () -> Package return self._package @property def local_config(self): # type: () -> dict return self._local_config @property def locker(self): # type: () -> Locker return self._locker @property def pool(self): # type: () -> Pool return self._pool @classmethod def create(cls, cwd): # type: (Path) -> Poetry candidates = [Path(cwd)] candidates.extend(Path(cwd).parents) for path in candidates: poetry_file = path / "pyproject.toml" if poetry_file.exists(): break else: raise RuntimeError( "Poetry could not find a pyproject.toml file in {} or its parents".format( cwd ) ) local_config = TomlFile(poetry_file.as_posix()).read(True) if "tool" not in local_config or "poetry" not in local_config["tool"]: raise RuntimeError( "[tool.poetry] section not found in {}".format(poetry_file.name) ) local_config = local_config["tool"]["poetry"] # Checking validity cls.check(local_config) # Load package name = local_config["name"] version = local_config["version"] package = ProjectPackage(name, version, version) package.root_dir = poetry_file.parent for author in local_config["authors"]: package.authors.append(author) package.description = local_config.get("description", "") package.homepage = local_config.get("homepage") package.repository_url = local_config.get("repository") package.license = local_config.get("license") package.keywords = local_config.get("keywords", []) package.classifiers = local_config.get("classifiers", []) if "readme" in local_config: package.readme = Path(poetry_file.parent) / local_config["readme"] if "platform" in local_config: package.platform = local_config["platform"] if "dependencies" in local_config: for name, constraint in local_config["dependencies"].items(): if name.lower() == "python": package.python_versions = constraint continue package.add_dependency(name, constraint) if "dev-dependencies" in local_config: for name, constraint in local_config["dev-dependencies"].items(): package.add_dependency(name, constraint, category="dev") extras = local_config.get("extras", {}) for extra_name, requirements in extras.items(): package.extras[extra_name] = [] # Checking for dependency for req in requirements: req = Dependency(req, "*") for dep in package.requires: if dep.name == req.name: dep.in_extras.append(extra_name) package.extras[extra_name].append(dep) break if "build" in local_config: package.build = local_config["build"] if "include" in local_config: package.include = local_config["include"] if "exclude" in local_config: package.exclude = local_config["exclude"] locker = Locker(poetry_file.with_suffix(".lock"), local_config) return cls(poetry_file, local_config, package, locker) @classmethod def check(cls, config, strict=False): # type: (dict, bool) -> bool """ Checks the validity of a configuration """ schema = Path(__file__).parent / "json" / "schemas" / "poetry-schema.json" with schema.open() as f: schema = json.loads(f.read()) try: jsonschema.validate(config, schema) except jsonschema.ValidationError as e: message = e.message if e.path: message = "[{}] {}".format(".".join(e.path), message) raise InvalidProjectFile(message) if strict: # If strict, check the file more thoroughly # Checking license license = config.get("license") if license: try: license_by_id(license) except ValueError: raise InvalidProjectFile("Invalid license") return True PK!-;}poetry/puzzle/__init__.pyfrom .solver import Solver PK!#poetry/puzzle/dependencies.pyclass Dependencies: """ Proxy to package dependencies to only require them when needed. """ def __init__(self, package, provider): self._package = package self._provider = provider self._dependencies = None @property def dependencies(self): if self._dependencies is None: self._dependencies = self._get_dependencies() return self._dependencies def _get_dependencies(self): self._provider.debug("Getting dependencies for {}".format(self._package), 0) dependencies = self._provider._dependencies_for(self._package) if dependencies is None: dependencies = [] return dependencies def __len__(self): return len(self.dependencies) def __iter__(self): return self.dependencies.__iter__() def __add__(self, other): return self.dependencies + other __radd__ = __add__ PK!Npoetry/puzzle/exceptions.pyclass SolverProblemError(Exception): def __init__(self, error): self._error = error super(SolverProblemError, self).__init__(str(error)) @property def error(self): return self._error PK!,MYY$poetry/puzzle/operations/__init__.pyfrom .install import Install from .uninstall import Uninstall from .update import Update PK!h9ii#poetry/puzzle/operations/install.pyfrom .operation import Operation class Install(Operation): def __init__(self, package, reason=None): super(Install, self).__init__(reason) self._package = package @property def package(self): return self._package @property def job_type(self): return "install" def __str__(self): return "Installing {} ({})".format( self.package.pretty_name, self.format_version(self.package) ) def __repr__(self): return "".format( self.package.pretty_name, self.format_version(self.package) ) PK!ť%poetry/puzzle/operations/operation.py# -*- coding: utf-8 -*- from typing import Union class Operation(object): def __init__(self, reason=None): # type: (Union[str, None]) -> None self._reason = reason self._skipped = False self._skip_reason = None @property def job_type(self): # type: () -> str raise NotImplementedError @property def reason(self): # type: () -> str return self._reason @property def skipped(self): # type: () -> bool return self._skipped @property def skip_reason(self): # type: () -> Union[str, None] return self._skip_reason def format_version(self, package): # type: (...) -> str return package.full_pretty_version def skip(self, reason): # type: (str) -> Operation self._skipped = True self._skip_reason = reason return self def unskip(self): # type: () -> Operation self._skipped = False self._skip_reason = None return self PK!^Ntt%poetry/puzzle/operations/uninstall.pyfrom .operation import Operation class Uninstall(Operation): def __init__(self, package, reason=None): super(Uninstall, self).__init__(reason) self._package = package @property def package(self): return self._package @property def job_type(self): return "uninstall" def __str__(self): return "Uninstalling {} ({})".format( self.package.pretty_name, self.format_version(self._package) ) def __repr__(self): return "".format( self.package.pretty_name, self.format_version(self.package) ) PK!Y%{ZZ"poetry/puzzle/operations/update.pyfrom .operation import Operation class Update(Operation): def __init__(self, initial, target, reason=None): self._initial_package = initial self._target_package = target super(Update, self).__init__(reason) @property def initial_package(self): return self._initial_package @property def target_package(self): return self._target_package @property def package(self): return self._target_package @property def job_type(self): return "update" def __str__(self): return "Updating {} ({}) to {} ({})".format( self.initial_package.pretty_name, self.format_version(self.initial_package), self.target_package.pretty_name, self.format_version(self.target_package), ) def __repr__(self): return "".format( self.initial_package.pretty_name, self.format_version(self.initial_package), self.target_package.pretty_name, self.format_version(self.target_package), ) PK![11poetry/puzzle/provider.pyimport os import pkginfo import shutil import time from cleo import ProgressIndicator from contextlib import contextmanager from functools import cmp_to_key from tempfile import mkdtemp from typing import List from typing import Union from poetry.packages import Dependency from poetry.packages import DirectoryDependency from poetry.packages import FileDependency from poetry.packages import Package from poetry.packages import VCSDependency from poetry.packages import dependency_from_pep_508 from poetry.mixology.incompatibility import Incompatibility from poetry.mixology.incompatibility_cause import DependencyCause from poetry.mixology.incompatibility_cause import PlatformCause from poetry.mixology.incompatibility_cause import PythonCause from poetry.mixology.term import Term from poetry.repositories import Pool from poetry.utils._compat import Path from poetry.utils.helpers import parse_requires from poetry.utils.toml_file import TomlFile from poetry.utils.venv import Venv from poetry.vcs.git import Git from .dependencies import Dependencies class Indicator(ProgressIndicator): def __init__(self, output): super(Indicator, self).__init__(output) self.format = "%message% (%elapsed:2s%)" @contextmanager def auto(self): message = "Resolving dependencies..." with super(Indicator, self).auto(message, message): yield def _formatter_elapsed(self): elapsed = time.time() - self.start_time return "{:.1f}s".format(elapsed) class Provider: UNSAFE_PACKAGES = {"setuptools", "distribute", "pip"} def __init__(self, package, pool, io): # type: Package # type: Pool self._package = package self._pool = pool self._io = io self._python_constraint = package.python_constraint self._search_for = {} self._is_debugging = self._io.is_debug() or self._io.is_very_verbose() @property def pool(self): # type: () -> Pool return self._pool @property def name_for_explicit_dependency_source(self): # type: () -> str return "pyproject.toml" @property def name_for_locking_dependency_source(self): # type: () -> str return "pyproject.lock" def is_debugging(self): return self._is_debugging def name_for(self, dependency): # type: (Dependency) -> str """ Returns the name for the given dependency. """ return dependency.name def search_for(self, dependency): # type: (Dependency) -> List[Package] """ Search for the specifications that match the given dependency. The specifications in the returned list will be considered in reverse order, so the latest version ought to be last. """ if dependency.is_root: return [self._package] if dependency in self._search_for: return self._search_for[dependency] if dependency.is_vcs(): packages = self.search_for_vcs(dependency) elif dependency.is_file(): packages = self.search_for_file(dependency) elif dependency.is_directory(): packages = self.search_for_directory(dependency) else: constraint = dependency.constraint packages = self._pool.find_packages( dependency.name, constraint, extras=dependency.extras, allow_prereleases=dependency.allows_prereleases(), ) packages.sort( key=lambda p: ( not p.is_prerelease() and not dependency.allows_prereleases(), p.version, ), reverse=True, ) self._search_for[dependency] = packages return self._search_for[dependency] def search_for_vcs(self, dependency): # type: (VCSDependency) -> List[Package] """ Search for the specifications that match the given VCS dependency. Basically, we clone the repository in a temporary directory and get the information we need by checking out the specified reference. """ if dependency.vcs != "git": raise ValueError("Unsupported VCS dependency {}".format(dependency.vcs)) tmp_dir = Path(mkdtemp(prefix="pypoetry-git-{}".format(dependency.name))) try: git = Git() git.clone(dependency.source, tmp_dir) git.checkout(dependency.reference, tmp_dir) revision = git.rev_parse(dependency.reference, tmp_dir).strip() if dependency.tag or dependency.rev: revision = dependency.reference pyproject = TomlFile(tmp_dir / "pyproject.toml") pyproject_content = None has_poetry = False if pyproject.exists(): pyproject_content = pyproject.read(True) has_poetry = ( "tool" in pyproject_content and "poetry" in pyproject_content["tool"] ) if pyproject_content and has_poetry: # If a pyproject.toml file exists # We use it to get the information we need info = pyproject_content["tool"]["poetry"] name = info["name"] version = info["version"] package = Package(name, version, version) package.source_type = dependency.vcs package.source_url = dependency.source package.source_reference = dependency.reference for req_name, req_constraint in info["dependencies"].items(): if req_name == "python": package.python_versions = req_constraint continue package.add_dependency(req_name, req_constraint) else: # We need to use setup.py here # to figure the information we need # We need to place ourselves in the proper # folder for it to work venv = Venv.create(self._io) current_dir = os.getcwd() os.chdir(tmp_dir.as_posix()) try: venv.run("python", "setup.py", "egg_info") egg_info = list(tmp_dir.glob("*.egg-info"))[0] meta = pkginfo.UnpackedSDist(str(egg_info)) if meta.requires_dist: reqs = list(meta.requires_dist) else: reqs = [] requires = egg_info / "requires.txt" if requires.exists(): with requires.open() as f: reqs = parse_requires(f.read()) package = Package(meta.name, meta.version) for req in reqs: package.requires.append(dependency_from_pep_508(req)) except Exception: raise finally: os.chdir(current_dir) package.source_type = "git" package.source_url = dependency.source package.source_reference = revision except Exception: raise finally: shutil.rmtree(tmp_dir.as_posix()) return [package] def search_for_file(self, dependency): # type: (FileDependency) -> List[Package] package = Package(dependency.name, dependency.pretty_constraint) package.source_type = "file" package.source_url = str(dependency.path) package.description = dependency.metadata.summary for req in dependency.metadata.requires_dist: package.requires.append(dependency_from_pep_508(req)) if dependency.metadata.requires_python: package.python_versions = dependency.metadata.requires_python if dependency.metadata.platforms: package.platform = " || ".join(dependency.metadata.platforms) package.hashes = [dependency.hash()] return [package] def search_for_directory( self, dependency ): # type: (DirectoryDependency) -> List[Package] package = dependency.package if dependency.extras: for extra in dependency.extras: if extra in package.extras: for dep in package.extras[extra]: dep.activate() return [package] def incompatibilities_for( self, package ): # type: (Package) -> List[Incompatibility] """ Returns incompatibilities that encapsulate a given package's dependencies, or that it can't be safely selected. If multiple subsequent versions of this package have the same dependencies, this will return incompatibilities that reflect that. It won't return incompatibilities that have already been returned by a previous call to _incompatibilities_for(). """ if package.source_type in ["git", "file", "directory"]: dependencies = package.requires elif package.is_root(): dependencies = package.all_requires else: dependencies = self._dependencies_for(package) if not self._package.python_constraint.allows_any(package.python_constraint): return [ Incompatibility( [Term(package.to_dependency(), True)], PythonCause(package.python_versions), ) ] if not self._package.platform_constraint.matches(package.platform_constraint): return [ Incompatibility( [Term(package.to_dependency(), True)], PlatformCause(package.platform), ) ] return [ Incompatibility( [Term(package.to_dependency(), True), Term(dep, False)], DependencyCause(), ) for dep in dependencies ] def dependencies_for( self, package ): # type: (Package) -> Union[List[Dependency], Dependencies] if package.source_type in ["git", "file", "directory"]: # Information should already be set return [ r for r in package.requires if not r.is_activated() and r.name not in self.UNSAFE_PACKAGES ] else: return Dependencies(package, self) def _dependencies_for(self, package): # type: (Package) -> List[Dependency] complete_package = self._pool.package( package.name, package.version.text, extras=package.requires_extras ) # Update package with new information package.requires = complete_package.requires package.description = complete_package.description package.python_versions = complete_package.python_versions package.platform = complete_package.platform package.hashes = complete_package.hashes package.extras = complete_package.extras return [ r for r in package.requires if r.is_activated() and self._package.python_constraint.allows_any(r.python_constraint) and self._package.platform_constraint.matches(package.platform_constraint) and r.name not in self.UNSAFE_PACKAGES ] # UI @property def output(self): return self._io def before_resolution(self): self._io.write("Resolving dependencies") if self.is_debugging(): self._io.new_line() def indicate_progress(self): if not self.is_debugging(): self._io.write(".") def after_resolution(self): self._io.new_line() def debug(self, message, depth=0): if self.is_debugging(): debug_info = str(message) debug_info = ( "\n".join( [ "{}: {}".format(str(depth).rjust(4), s) for s in debug_info.split("\n") ] ) + "\n" ) self.output.write(debug_info) @contextmanager def progress(self): if not self._io.is_decorated() or self.is_debugging(): self.output.writeln("Resolving dependencies...") yield else: indicator = Indicator(self._io) with indicator.auto(): yield PK!8kW  poetry/puzzle/solver.pyfrom typing import List from poetry.mixology import resolve_version from poetry.mixology.failure import SolveFailure from poetry.packages.constraints.generic_constraint import GenericConstraint from poetry.semver import parse_constraint from .exceptions import SolverProblemError from .operations import Install from .operations import Uninstall from .operations import Update from .operations.operation import Operation from .provider import Provider class Solver: def __init__(self, package, pool, installed, locked, io): self._package = package self._pool = pool self._installed = installed self._locked = locked self._io = io def solve(self, use_latest=None): # type: (...) -> List[Operation] provider = Provider(self._package, self._pool, self._io) locked = {} for package in self._locked.packages: locked[package.name] = package try: result = resolve_version( self._package, provider, locked=locked, use_latest=use_latest ) except SolveFailure as e: raise SolverProblemError(e) packages = result.packages requested = self._package.all_requires for package in packages: graph = self._build_graph(self._package, packages) category, optional, python, platform = self._get_tags_for_package( package, graph ) package.category = category package.optional = optional # If requirements are empty, drop them requirements = {} if python is not None and python != "*": requirements["python"] = python if platform is not None and platform != "*": requirements["platform"] = platform package.requirements = requirements operations = [] for package in packages: installed = False for pkg in self._installed.packages: if package.name == pkg.name: installed = True # Checking version if package.version != pkg.version: operations.append(Update(pkg, package)) else: operations.append(Install(package).skip("Already installed")) break if not installed: operations.append(Install(package)) # Checking for removals for pkg in self._locked.packages: remove = True for package in packages: if pkg.name == package.name: remove = False break if remove: skip = True for installed in self._installed.packages: if installed.name == pkg.name: skip = False break op = Uninstall(pkg) if skip: op.skip("Not currently installed") operations.append(op) requested_names = [r.name for r in self._package.all_requires] return sorted( operations, key=lambda o: ( 1 if o.package.name in requested_names else 0, o.package.name, ), ) def _build_graph(self, package, packages, previous=None, dep=None): if not previous: category = "dev" optional = True python_version = None platform = None else: category = dep.category optional = dep.is_optional() and not dep.is_activated() python_version = ( dep.python_versions if previous.python_constraint.allows_all(dep.python_constraint) else previous.python_versions ) platform = ( dep.platform if previous.platform_constraint.matches(dep.platform_constraint) and dep.platform != "*" else previous.platform ) graph = { "name": package.name, "category": category, "optional": optional, "python_version": python_version, "platform": platform, "children": [], } if previous and previous is not dep and previous.name == dep.name: return graph for dependency in package.all_requires: if dependency.is_optional(): if not package.is_root() and (not dep or not dep.extras): continue is_activated = False for group, extras in package.extras.items(): if dep: extras = dep.extras elif package.is_root(): extras = package.extras else: extras = [] if group in extras: is_activated = True break if not is_activated: continue for pkg in packages: if pkg.name == dependency.name: graph["children"].append( self._build_graph(pkg, packages, dependency, dep or dependency) ) return graph def _get_tags_for_package(self, package, graph): categories = ["dev"] optionals = [True] python_versions = [] platforms = [] children = graph["children"] for child in children: if child["name"] == package.name: category = child["category"] optional = child["optional"] python_version = child["python_version"] platform = child["platform"] else: ( category, optional, python_version, platform, ) = self._get_tags_for_package(package, child) categories.append(category) optionals.append(optional) if python_version is not None: python_versions.append(python_version) if platform is not None: platforms.append(platform) if "main" in categories: category = "main" else: category = "dev" optional = all(optionals) if not python_versions: python_version = None else: # Find the least restrictive constraint python_version = python_versions[0] for constraint in python_versions[1:]: previous = parse_constraint(python_version) current = parse_constraint(constraint) if python_version == "*": continue elif constraint == "*": python_version = constraint elif current.allows_all(previous): python_version = constraint if not platforms: platform = None else: platform = platforms[0] for constraint in platforms[1:]: previous = GenericConstraint.parse(platform) current = GenericConstraint.parse(constraint) if platform == "*": continue elif constraint == "*": platform = constraint elif current.matches(previous): platform = constraint return category, optional, python_version, platform PK!y::poetry/repositories/__init__.pyfrom .pool import Pool from .repository import Repository PK! uKK&poetry/repositories/base_repository.pyclass BaseRepository(object): SEARCH_FULLTEXT = 0 SEARCH_NAME = 1 def __init__(self): self._packages = [] @property def packages(self): return self._packages def has_package(self, package): raise NotImplementedError() def package(self, name, version, extras=None): raise NotImplementedError() def find_packages( self, name, constraint=None, extras=None, allow_prereleases=False ): raise NotImplementedError() def search(self, query, mode=SEARCH_FULLTEXT): raise NotImplementedError() PK!Z nn+poetry/repositories/installed_repository.pyfrom poetry.packages import Package from poetry.utils.venv import Venv from .repository import Repository class InstalledRepository(Repository): @classmethod def load(cls, venv): # type: (Venv) -> InstalledRepository """ Load installed packages. For now, it uses the pip "freeze" command. """ repo = cls() freeze_output = venv.run("pip", "freeze") for line in freeze_output.split("\n"): if "==" in line: name, version = line.split("==") repo.add_package(Package(name, version, version)) return repo PK!' VE/(/((poetry/repositories/legacy_repository.pyimport cgi import re try: import urllib.parse as urlparse except ImportError: import urlparse try: from html import unescape except ImportError: try: from html.parser import HTMLParser except ImportError: from HTMLParser import HTMLParser unescape = HTMLParser().unescape from typing import Generator from typing import Union import html5lib import requests from cachecontrol import CacheControl from cachecontrol.caches.file_cache import FileCache from cachy import CacheManager import poetry.packages from poetry.locations import CACHE_DIR from poetry.masonry.publishing.uploader import wheel_file_re from poetry.packages import Package from poetry.packages import dependency_from_pep_508 from poetry.packages.utils.link import Link from poetry.semver import parse_constraint from poetry.semver import Version from poetry.semver import VersionConstraint from poetry.utils._compat import Path from poetry.utils.helpers import canonicalize_name from poetry.version.markers import InvalidMarker from .pypi_repository import PyPiRepository class Page: VERSION_REGEX = re.compile("(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)") def __init__(self, url, content, headers): if not url.endswith("/"): url += "/" self._url = url encoding = None if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: encoding = params["charset"] self._content = content if encoding is None: self._parsed = html5lib.parse(content, namespaceHTMLElements=False) else: self._parsed = html5lib.parse( content, transport_encoding=encoding, namespaceHTMLElements=False ) @property def versions(self): # type: () -> Generator[Version] seen = set() for link in self.links: version = self.link_version(link) if not version: continue if version in seen: continue seen.add(version) yield version @property def links(self): # type: () -> Generator[Link] for anchor in self._parsed.findall(".//a"): if anchor.get("href"): href = anchor.get("href") url = self.clean_link(urlparse.urljoin(self._url, href)) pyrequire = anchor.get("data-requires-python") pyrequire = unescape(pyrequire) if pyrequire else None yield Link(url, self, requires_python=pyrequire) def links_for_version(self, version): # type: (Version) -> Generator[Link] for link in self.links: if self.link_version(link) == version: yield link def link_version(self, link): # type: (Link) -> Union[Version, None] m = wheel_file_re.match(link.filename) if m: version = m.group("ver") else: info, ext = link.splitext() match = self.VERSION_REGEX.match(info) if not match: return version = match.group(2) try: version = Version.parse(version) except ValueError: return return version _clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I) def clean_link(self, url): """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url) class LegacyRepository(PyPiRepository): def __init__(self, name, url, disable_cache=False): if name == "pypi": raise ValueError("The name [pypi] is reserved for repositories") self._packages = [] self._name = name self._url = url.rstrip("/") self._cache_dir = Path(CACHE_DIR) / "cache" / "repositories" / name self._cache = CacheManager( { "default": "releases", "serializer": "json", "stores": { "releases": {"driver": "file", "path": str(self._cache_dir)}, "packages": {"driver": "dict"}, "matches": {"driver": "dict"}, }, } ) self._session = CacheControl( requests.session(), cache=FileCache(str(self._cache_dir / "_http")) ) self._disable_cache = disable_cache @property def name(self): return self._name def find_packages( self, name, constraint=None, extras=None, allow_prereleases=False ): packages = [] if constraint is not None and not isinstance(constraint, VersionConstraint): constraint = parse_constraint(constraint) key = name if constraint: key = "{}:{}".format(key, str(constraint)) if self._cache.store("matches").has(key): versions = self._cache.store("matches").get(key) else: page = self._get("/{}".format(canonicalize_name(name).replace(".", "-"))) if page is None: raise ValueError('No package named "{}"'.format(name)) versions = [] for version in page.versions: if not constraint or (constraint and constraint.allows(version)): versions.append(version) self._cache.store("matches").put(key, versions, 5) for version in versions: package = Package(name, version) if extras is not None: package.requires_extras = extras packages.append(package) return packages def package( self, name, version, extras=None ): # type: (...) -> poetry.packages.Package """ Retrieve the release information. This is a heavy task which takes time. We have to download a package to get the dependencies. We also need to download every file matching this release to get the various hashes. Note that, this will be cached so the subsequent operations should be much faster. """ try: index = self._packages.index( poetry.packages.Package(name, version, version) ) return self._packages[index] except ValueError: if extras is None: extras = [] release_info = self.get_release_info(name, version) package = poetry.packages.Package(name, version, version) requires_dist = release_info["requires_dist"] or [] for req in requires_dist: try: dependency = dependency_from_pep_508(req) except InvalidMarker: # Invalid marker # We strip the markers hoping for the best req = req.split(";")[0] dependency = dependency_from_pep_508(req) if dependency.extras: for extra in dependency.extras: if extra not in package.extras: package.extras[extra] = [] package.extras[extra].append(dependency) if not dependency.is_optional(): package.requires.append(dependency) # Adding description package.description = release_info.get("summary", "") # Adding hashes information package.hashes = release_info["digests"] # Activate extra dependencies for extra in extras: if extra in package.extras: for dep in package.extras[extra]: dep.activate() package.requires += package.extras[extra] self._packages.append(package) return package def _get_release_info(self, name, version): # type: (str, str) -> dict page = self._get("/{}".format(canonicalize_name(name).replace(".", "-"))) if page is None: raise ValueError('No package named "{}"'.format(name)) data = { "name": name, "version": version, "summary": "", "requires_dist": [], "requires_python": [], "digests": [], } links = list(page.links_for_version(Version.parse(version))) urls = {} hashes = [] default_link = links[0] for link in links: if link.is_wheel: urls["bdist_wheel"] = link.url elif link.filename.endswith(".tar.gz"): urls["sdist"] = link.url elif link.filename.endswith((".zip", ".bz2")) and "sdist" not in urls: urls["sdist"] = link.url hash = link.hash if link.hash_name == "sha256": hashes.append(hash) data["digests"] = hashes if not urls: if default_link.is_wheel: m = wheel_file_re.match(default_link.filename) python = m.group("pyver") platform = m.group("plat") if python == "py2.py3" and platform == "any": urls["bdist_wheel"] = default_link.url elif default_link.filename.endswith(".tar.gz"): urls["sdist"] = default_link.url elif ( default_link.filename.endswith((".zip", ".bz2")) and "sdist" not in urls ): urls["sdist"] = default_link.url else: return data info = self._get_info_from_urls(urls) data["summary"] = info["summary"] data["requires_dist"] = info["requires_dist"] data["requires_python"] = info["requires_python"] return data def _get(self, endpoint): # type: (str) -> Union[Page, None] url = self._url + endpoint response = self._session.get(url) if response.status_code == 404: return return Page(url, response.content, response.headers) PK!41jSs s poetry/repositories/pool.pyfrom typing import List from typing import Union import poetry.packages from .base_repository import BaseRepository from .repository import Repository class Pool(BaseRepository): def __init__(self, repositories=None): # type: (Union[list, None]) -> None if repositories is None: repositories = [] self._repositories = [] for repository in repositories: self.add_repository(repository) super(Pool, self).__init__() @property def repositories(self): # type: () -> List[Repository] return self._repositories def add_repository(self, repository): # type: (Repository) -> Pool """ Adds a repository to the pool. """ self._repositories.append(repository) return self def remove_repository(self, repository_name): # type: (str) -> Pool for i, repository in enumerate(self._repositories): if repository.name == repository_name: del self._repositories[i] break return self def configure(self, source): # type: (dict) -> Pool """ Configures a repository based on a source specification and add it to the pool. """ from .legacy_repository import LegacyRepository if "url" in source: # PyPI-like repository if "name" not in source: raise RuntimeError("Missing [name] in source.") repository = LegacyRepository(source["name"], source["url"]) else: raise RuntimeError("Unsupported source specified") return self.add_repository(repository) def has_package(self, package): raise NotImplementedError() def package(self, name, version, extras=None): package = poetry.packages.Package(name, version, version) if package in self._packages: return self._packages[self._packages.index(package)] for repository in self._repositories: package = repository.package(name, version, extras=extras) if package: self._packages.append(package) return package return None def find_packages( self, name, constraint=None, extras=None, allow_prereleases=False ): for repository in self._repositories: packages = repository.find_packages( name, constraint, extras=extras, allow_prereleases=allow_prereleases ) if packages: return packages return [] def search(self, query, mode=BaseRepository.SEARCH_FULLTEXT): from .legacy_repository import LegacyRepository results = [] for repository in self._repositories: if isinstance(repository, LegacyRepository): continue results += repository.search(query, mode=mode) return results PK! m7>7>&poetry/repositories/pypi_repository.pyimport logging import os import tarfile import zipfile import pkginfo from bz2 import BZ2File from gzip import GzipFile from typing import Dict from typing import List from typing import Union try: import urllib.parse as urlparse except ImportError: import urlparse try: from xmlrpc.client import ServerProxy except ImportError: from xmlrpclib import ServerProxy from cachecontrol import CacheControl from cachecontrol.caches.file_cache import FileCache from cachy import CacheManager from requests import get from requests import session from poetry.io import NullIO from poetry.locations import CACHE_DIR from poetry.packages import dependency_from_pep_508 from poetry.packages import Package from poetry.semver import parse_constraint from poetry.semver import VersionConstraint from poetry.utils._compat import Path from poetry.utils._compat import to_str from poetry.utils.helpers import parse_requires from poetry.utils.helpers import temporary_directory from poetry.utils.venv import Venv from poetry.version.markers import InvalidMarker from .repository import Repository logger = logging.getLogger(__name__) class PyPiRepository(Repository): def __init__(self, url="https://pypi.org/", disable_cache=False, fallback=True): self._name = "PyPI" self._url = url self._disable_cache = disable_cache self._fallback = fallback release_cache_dir = Path(CACHE_DIR) / "cache" / "repositories" / "pypi" self._cache = CacheManager( { "default": "releases", "serializer": "json", "stores": { "releases": {"driver": "file", "path": str(release_cache_dir)}, "packages": {"driver": "dict"}, }, } ) self._session = CacheControl( session(), cache=FileCache(str(release_cache_dir / "_http")) ) super(PyPiRepository, self).__init__() def find_packages( self, name, # type: str constraint=None, # type: Union[VersionConstraint, str, None] extras=None, # type: Union[list, None] allow_prereleases=False, # type: bool ): # type: (...) -> List[Package] """ Find packages on the remote server. """ if constraint is None: constraint = "*" if not isinstance(constraint, VersionConstraint): constraint = parse_constraint(constraint) info = self.get_package_info(name) packages = [] for version, release in info["releases"].items(): if not release: # Bad release self._log( "No release information found for {}-{}, skipping".format( name, version ), level="debug", ) continue package = Package(name, version) if ( package.is_prerelease() and not allow_prereleases and not constraint.allows(package.version) ): continue if not constraint or (constraint and constraint.allows(package.version)): if extras is not None: package.requires_extras = extras packages.append(package) self._log( "{} packages found for {} {}".format(len(packages), name, str(constraint)), level="debug", ) return packages def package( self, name, # type: str version, # type: str extras=None, # type: (Union[list, None]) ): # type: (...) -> Union[Package, None] try: index = self._packages.index(Package(name, version, version)) return self._packages[index] except ValueError: if extras is None: extras = [] release_info = self.get_release_info(name, version) package = Package(name, version, version) requires_dist = release_info["requires_dist"] or [] for req in requires_dist: try: dependency = dependency_from_pep_508(req) except InvalidMarker: # Invalid marker # We strip the markers hoping for the best req = req.split(";")[0] dependency = dependency_from_pep_508(req) except ValueError: # Likely unable to parse constraint so we skip it self._log( "Invalid constraint ({}) found in {}-{} dependencies, " "skipping".format(req, package.name, package.version), level="debug", ) continue if dependency.extras: for extra in dependency.extras: if extra not in package.extras: package.extras[extra] = [] package.extras[extra].append(dependency) if not dependency.is_optional(): package.requires.append(dependency) # Adding description package.description = release_info.get("summary", "") if release_info["requires_python"]: package.python_versions = release_info["requires_python"] if release_info["platform"]: package.platform = release_info["platform"] # Adding hashes information package.hashes = release_info["digests"] # Activate extra dependencies for extra in extras: if extra in package.extras: for dep in package.extras[extra]: dep.activate() package.requires += package.extras[extra] self._packages.append(package) return package def search(self, query, mode=0): results = [] search = {"name": query} if mode == self.SEARCH_FULLTEXT: search["summary"] = query client = ServerProxy("https://pypi.python.org/pypi") hits = client.search(search, "or") for hit in hits: result = Package(hit["name"], hit["version"], hit["version"]) result.description = to_str(hit["summary"]) results.append(result) return results def get_package_info(self, name): # type: (str) -> dict """ Return the package information given its name. The information is returned from the cache if it exists or retrieved from the remote server. """ if self._disable_cache: return self._get_package_info(name) return self._cache.store("packages").remember_forever( name, lambda: self._get_package_info(name) ) def _get_package_info(self, name): # type: (str) -> dict data = self._get("pypi/{}/json".format(name)) if data is None: raise ValueError("Package [{}] not found.".format(name)) return data def get_release_info(self, name, version): # type: (str, str) -> dict """ Return the release information given a package name and a version. The information is returned from the cache if it exists or retrieved from the remote server. """ if self._disable_cache: return self._get_release_info(name, version) return self._cache.remember_forever( "{}:{}".format(name, version), lambda: self._get_release_info(name, version) ) def _get_release_info(self, name, version): # type: (str, str) -> dict self._log("Getting info for {} ({}) from PyPI".format(name, version), "debug") json_data = self._get("pypi/{}/{}/json".format(name, version)) if json_data is None: raise ValueError("Package [{}] not found.".format(name)) info = json_data["info"] data = { "name": info["name"], "version": info["version"], "summary": info["summary"], "platform": info["platform"], "requires_dist": info["requires_dist"], "requires_python": info["requires_python"], "digests": [], "_fallback": False, } try: version_info = json_data["releases"][version] except KeyError: version_info = [] for file_info in version_info: data["digests"].append(file_info["digests"]["sha256"]) if self._fallback and data["requires_dist"] is None: self._log("No dependencies found, downloading archives", level="debug") # No dependencies set (along with other information) # This might be due to actually no dependencies # or badly set metadata when uploading # So, we need to make sure there is actually no # dependencies by introspecting packages urls = {} for url in json_data["urls"]: # Only get sdist and universal wheels dist_type = url["packagetype"] if dist_type not in ["sdist", "bdist_wheel"]: continue if dist_type == "sdist" and "dist" not in urls: urls[url["packagetype"]] = url["url"] continue if "bdist_wheel" in urls: continue # If bdist_wheel, check if it's universal python_version = url["python_version"] if python_version not in ["py2.py3", "py3", "py2"]: continue parts = urlparse.urlparse(url["url"]) filename = os.path.basename(parts.path) if "-none-any" not in filename: continue if not urls: return data info = self._get_info_from_urls(urls) data["requires_dist"] = info["requires_dist"] if not data["requires_python"]: data["requires_python"] = info["requires_python"] return data def _get(self, endpoint): # type: (str) -> Union[dict, None] json_response = self._session.get(self._url + endpoint) if json_response.status_code == 404: return None json_data = json_response.json() return json_data def _get_info_from_urls( self, urls ): # type: (Dict[str, str]) -> Dict[str, Union[str, List, None]] if "bdist_wheel" in urls: return self._get_info_from_wheel(urls["bdist_wheel"]) return self._get_info_from_sdist(urls["sdist"]) def _get_info_from_wheel( self, url ): # type: (str) -> Dict[str, Union[str, List, None]] info = {"summary": "", "requires_python": None, "requires_dist": None} filename = os.path.basename(urlparse.urlparse(url).path) with temporary_directory() as temp_dir: filepath = os.path.join(temp_dir, filename) self._download(url, filepath) try: meta = pkginfo.Wheel(filepath) except ValueError: # Unable to determine dependencies # Assume none return info if meta.summary: info["summary"] = meta.summary or "" info["requires_python"] = meta.requires_python if meta.requires_dist: info["requires_dist"] = meta.requires_dist return info def _get_info_from_sdist( self, url ): # type: (str) -> Dict[str, Union[str, List, None]] info = {"summary": "", "requires_python": None, "requires_dist": None} filename = os.path.basename(urlparse.urlparse(url).path) with temporary_directory() as temp_dir: filepath = Path(temp_dir) / filename self._download(url, str(filepath)) try: meta = pkginfo.SDist(str(filepath)) if meta.summary: info["summary"] = meta.summary if meta.requires_python: info["requires_python"] = meta.requires_python if meta.requires_dist: info["requires_dist"] = list(meta.requires_dist) return info except ValueError: # Unable to determine dependencies # We pass and go deeper pass # Still not dependencies found # So, we unpack and introspect suffix = filepath.suffix gz = None if suffix == ".zip": tar = zipfile.ZipFile(str(filepath)) else: if suffix == ".bz2": gz = BZ2File(str(filepath)) else: gz = GzipFile(str(filepath)) tar = tarfile.TarFile(str(filepath), fileobj=gz) try: tar.extractall(os.path.join(temp_dir, "unpacked")) finally: if gz: gz.close() tar.close() unpacked = Path(temp_dir) / "unpacked" sdist_dir = unpacked / Path(filename).name.rstrip(".tar.gz") # Checking for .egg-info at root eggs = list(sdist_dir.glob("*.egg-info")) if eggs: egg_info = eggs[0] requires = egg_info / "requires.txt" if requires.exists(): with requires.open() as f: info["requires_dist"] = parse_requires(f.read()) return info # Searching for .egg-info in sub directories eggs = list(sdist_dir.glob("**/*.egg-info")) if eggs: egg_info = eggs[0] requires = egg_info / "requires.txt" if requires.exists(): with requires.open() as f: info["requires_dist"] = parse_requires(f.read()) return info # Still nothing, assume no dependencies # We could probably get them by executing # python setup.py egg-info but I don't feel # confortable executing a file just for the sake # of getting dependencies. return info def _inspect_sdist_with_setup(self, sdist_dir): info = {"requires_python": None, "requires_dist": None} setup = sdist_dir / "setup.py" if not setup.exists(): return info venv = Venv.create(NullIO()) current_dir = os.getcwd() os.chdir(sdist_dir.as_posix()) try: venv.run("python", "setup.py", "egg_info") egg_info = list(sdist_dir.glob("**/*.egg-info"))[0] meta = pkginfo.UnpackedSDist(str(egg_info)) if meta.requires_python: info["requires_python"] = meta.requires_python if meta.requires_dist: info["requires_dist"] = list(meta.requires_dist) else: requires = egg_info / "requires.txt" if requires.exists(): with requires.open() as f: info["requires_dist"] = parse_requires(f.read()) except Exception: pass os.chdir(current_dir) return info def _download(self, url, dest): # type: (str, str) -> None r = get(url, stream=True) with open(dest, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) def _log(self, msg, level="info"): getattr(logger, level)("{}: {}".format(self._name, msg)) PK!-N N !poetry/repositories/repository.pyfrom poetry.semver import parse_constraint from poetry.semver import VersionConstraint from .base_repository import BaseRepository class Repository(BaseRepository): def __init__(self, packages=None): super(Repository, self).__init__() if packages is None: packages = [] for package in packages: self.add_package(package) def package(self, name, version, extras=None): name = name.lower() if extras is None: extras = [] for package in self.packages: if name == package.name and package.version.text == version: # Activate extra dependencies for extra in extras: if extra in package.extras: for extra_dep in package.extras[extra]: for dep in package.requires: if dep.name == extra_dep.lower(): dep.activate() return package def find_packages( self, name, constraint=None, extras=None, allow_prereleases=False ): name = name.lower() packages = [] if extras is None: extras = [] if constraint is None: constraint = "*" if not isinstance(constraint, VersionConstraint): constraint = parse_constraint(constraint) for package in self.packages: if name == package.name: if ( package.is_prerelease() and not allow_prereleases and not constraint.allows(package.version) ): continue if constraint is None or constraint.allows(package.version): for dep in package.requires: for extra in extras: if extra not in package.extras: continue reqs = package.extras[extra] for req in reqs: if req.name == dep.name: dep.activate() packages.append(package) return packages def has_package(self, package): package_id = package.unique_name for repo_package in self.packages: if package_id == repo_package.unique_name: return True return False def add_package(self, package): self._packages.append(package) def remove_package(self, package): package_id = package.unique_name index = None for i, repo_package in enumerate(self.packages): if package_id == repo_package.unique_name: index = i break if index is not None: del self._packages[index] def search(self, query, mode=0): results = [] for package in self.packages: if query in package.name: results.append(package) return results def __len__(self): return len(self._packages) PK!t'poetry/semver/__init__.pyimport re from .empty_constraint import EmptyConstraint from .patterns import BASIC_CONSTRAINT from .patterns import CARET_CONSTRAINT from .patterns import TILDE_CONSTRAINT from .patterns import TILDE_PEP440_CONSTRAINT from .patterns import X_CONSTRAINT from .version import Version from .version_constraint import VersionConstraint from .version_range import VersionRange from .version_union import VersionUnion def parse_constraint(constraints): # type: (str) -> VersionConstraint if constraints == "*": return VersionRange() or_constraints = re.split("\s*\|\|?\s*", constraints.strip()) or_groups = [] for constraints in or_constraints: and_constraints = re.split( "(?< ,]) *(? 1: for constraint in and_constraints: constraint_objects.append(parse_single_constraint(constraint)) else: constraint_objects.append(parse_single_constraint(and_constraints[0])) if len(constraint_objects) == 1: constraint = constraint_objects[0] else: constraint = constraint_objects[0] for next_constraint in constraint_objects[1:]: constraint = constraint.intersect(next_constraint) or_groups.append(constraint) if len(or_groups) == 1: return or_groups[0] else: return VersionUnion.of(*or_groups) def parse_single_constraint(constraint): # type: (str) -> VersionConstraint m = re.match("(?i)^v?[xX*](\.[xX*])*$", constraint) if m: return VersionRange() # Tilde range m = TILDE_CONSTRAINT.match(constraint) if m: version = Version.parse(m.group(1)) high = version.stable.next_minor if len(m.group(1).split(".")) == 1: high = version.stable.next_major return VersionRange(version, high, include_min=True) return VersionRange() # PEP 440 Tilde range (~=) m = TILDE_PEP440_CONSTRAINT.match(constraint) if m: precision = 1 if m.group(3): precision += 1 if m.group(4): precision += 1 version = Version.parse(m.group(1)) if precision == 2: low = version high = version.stable.next_major else: low = Version(version.major, version.minor, 0) high = version.stable.next_minor return VersionRange(low, high, include_min=True) # Caret range m = CARET_CONSTRAINT.match(constraint) if m: version = Version.parse(m.group(1)) return VersionRange(version, version.next_breaking, include_min=True) # X Range m = X_CONSTRAINT.match(constraint) if m: op = m.group(1) major = int(m.group(2)) minor = m.group(3) if minor is not None: version = Version(major, int(minor), 0) result = VersionRange(version, version.next_minor, include_min=True) else: if major == 0: result = VersionRange(max=Version(1, 0, 0)) else: version = Version(major, 0, 0) result = VersionRange(version, version.next_major, include_min=True) if op == "!=": result = VersionRange().difference(result) return result # Basic comparator m = BASIC_CONSTRAINT.match(constraint) if m: op = m.group(1) version = m.group(2) try: version = Version.parse(version) except ValueError: raise ValueError( "Could not parse version constraint: {}".format(constraint) ) if op == "<": return VersionRange(max=version) elif op == "<=": return VersionRange(max=version, include_max=True) elif op == ">": return VersionRange(min=version) elif op == ">=": return VersionRange(min=version, include_min=True) elif op == "!=": return VersionUnion(VersionRange(max=version), VersionRange(min=version)) else: return version raise ValueError("Could not parse version constraint: {}".format(constraint)) PK!Du-22!poetry/semver/empty_constraint.pyfrom .version_constraint import VersionConstraint class EmptyConstraint(VersionConstraint): def is_empty(self): return True def is_any(self): return False def allows(self, version): return False def allows_all(self, other): return other.is_empty() def allows_any(self, other): return False def intersect(self, other): return self def union(self, other): return other def difference(self, other): return self def __str__(self): return "" PK!ɕpoetry/semver/patterns.pyimport re MODIFIERS = ( "[._-]?" "((?!post)(?:beta|b|c|pre|RC|alpha|a|patch|pl|p|dev)(?:(?:[.-]?\d+)*)?)?" "([+-]?([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?" ) _COMPLETE_VERSION = "v?(\d+)(?:\.(\d+))?(?:\.(\d+))?{}(?:\+[^\s]+)?".format(MODIFIERS) COMPLETE_VERSION = re.compile("(?i)" + _COMPLETE_VERSION) CARET_CONSTRAINT = re.compile("(?i)^\^({})$".format(_COMPLETE_VERSION)) TILDE_CONSTRAINT = re.compile("(?i)^~(?!=)({})$".format(_COMPLETE_VERSION)) TILDE_PEP440_CONSTRAINT = re.compile("(?i)^~=({})$".format(_COMPLETE_VERSION)) X_CONSTRAINT = re.compile("^(!= ?|==)?v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.[xX*])+$") BASIC_CONSTRAINT = re.compile( "(?i)^(<>|!=|>=?|<=?|==?)?\s*({})".format(_COMPLETE_VERSION) ) PK! ++poetry/semver/version.pyimport re from typing import List from typing import Union from .empty_constraint import EmptyConstraint from .patterns import COMPLETE_VERSION from .version_constraint import VersionConstraint from .version_range import VersionRange from .version_union import VersionUnion class Version(VersionRange): """ A parsed semantic version number. """ def __init__( self, major, # type: int minor=None, # type: Union[int, None] patch=None, # type: Union[int, None] pre=None, # type: Union[str, None] build=None, # type: Union[str, None] text=None, # type: Union[str, None] precision=None, # type: Union[str, None] ): # type: () -> None self._major = int(major) self._precision = None if precision is None: self._precision = 1 if minor is None: minor = 0 else: if self._precision is not None: self._precision += 1 self._minor = int(minor) if patch is None: patch = 0 else: if self._precision is not None: self._precision += 1 if precision is not None: self._precision = precision self._patch = int(patch) if text is None: parts = [str(major)] if self._precision >= 2 or minor != 0: parts.append(str(minor)) if self._precision >= 3 or patch != 0: parts.append(str(patch)) text = ".".join(parts) if pre: text += "-{}".format(pre) if build: text += "+{}".format(build) self._text = text pre = self._normalize_prerelease(pre) self._prerelease = [] if pre is not None: self._prerelease = self._split_parts(pre) build = self._normalize_build(build) self._build = [] if build is not None: if build.startswith(("-", "+")): build = build[1:] self._build = self._split_parts(build) @property def major(self): # type: () -> int return self._major @property def minor(self): # type: () -> int return self._minor @property def patch(self): # type: () -> int return self._patch @property def prerelease(self): # type: () -> List[str] return self._prerelease @property def build(self): # type: () -> List[str] return self._build @property def text(self): return self._text @property def stable(self): if not self.is_prerelease(): return self return self.next_patch @property def next_major(self): # type: () -> Version if self.is_prerelease() and self.minor == 0 and self.patch == 0: return Version(self.major, self.minor, self.patch) return self._increment_major() @property def next_minor(self): # type: () -> Version if self.is_prerelease() and self.patch == 0: return Version(self.major, self.minor, self.patch) return self._increment_minor() @property def next_patch(self): # type: () -> Version if self.is_prerelease(): return Version(self.major, self.minor, self.patch) return self._increment_patch() @property def next_breaking(self): # type: () -> Version if self.major == 0: if self.minor != 0: return self._increment_minor() if self._precision == 1: return self._increment_major() elif self._precision == 2: return self._increment_minor() return self._increment_patch() return self._increment_major() @property def first_prerelease(self): # type: () -> Version return Version.parse( "{}.{}.{}-alpha.0".format(self.major, self.minor, self.patch) ) @property def min(self): return self @property def max(self): return self @property def include_min(self): return True @property def include_max(self): return True @classmethod def parse(cls, text): # type: (str) -> Version match = COMPLETE_VERSION.match(text) if match is None: raise ValueError('Unable to parse "{}".'.format(text)) text = text.rstrip(".") major = int(match.group(1)) minor = int(match.group(2)) if match.group(2) else None patch = int(match.group(3)) if match.group(3) else None pre = match.group(4) build = match.group(5) if build: build = build.lstrip("+") return Version(major, minor, patch, pre, build, text) def is_any(self): return False def is_empty(self): return False def is_prerelease(self): # type: () -> bool return len(self._prerelease) > 0 def allows(self, version): # type: (Version) -> bool return self == version def allows_all(self, other): # type: (VersionConstraint) -> bool return other.is_empty() or other == self def allows_any(self, other): # type: (VersionConstraint) -> bool return other.allows(self) def intersect(self, other): # type: (VersionConstraint) -> VersionConstraint if other.allows(self): return self return EmptyConstraint() def union(self, other): # type: (VersionConstraint) -> VersionConstraint from .version_range import VersionRange if other.allows(self): return other if isinstance(other, VersionRange): if other.min == self: return VersionRange( other.min, other.max, include_min=True, include_max=other.include_max, ) if other.max == self: return VersionRange( other.min, other.max, include_min=other.include_min, include_max=True, ) return VersionUnion.of(self, other) def difference(self, other): # type: (VersionConstraint) -> VersionConstraint if other.allows(self): return EmptyConstraint() return self def _increment_major(self): # type: () -> Version return Version(self.major + 1, 0, 0, precision=self._precision) def _increment_minor(self): # type: () -> Version return Version(self.major, self.minor + 1, 0, precision=self._precision) def _increment_patch(self): # type: () -> Version return Version( self.major, self.minor, self.patch + 1, precision=self._precision ) def _normalize_prerelease(self, pre): # type: (str) -> str if not pre: return m = re.match("(?i)^(a|alpha|b|beta|c|pre|rc|dev)[-.]?(\d+)?$", pre) if not m: return modifier = m.group(1) number = m.group(2) if number is None: number = 0 if modifier == "a": modifier = "alpha" elif modifier == "b": modifier = "beta" elif modifier in {"c", "pre"}: modifier = "rc" elif modifier == "dev": modifier = "alpha" return "{}.{}".format(modifier, number) def _normalize_build(self, build): # type: (str) -> str if not build: return if build == "0": return if build.startswith("post"): build = build.lstrip("post") if not build: return return build def _split_parts(self, text): # type: (str) -> List[Union[str, int]] parts = text.split(".") for i, part in enumerate(parts): try: parts[i] = int(part) except (TypeError, ValueError): continue return parts def __lt__(self, other): return self._cmp(other) < 0 def __le__(self, other): return self._cmp(other) <= 0 def __gt__(self, other): return self._cmp(other) > 0 def __ge__(self, other): return self._cmp(other) >= 0 def _cmp(self, other): if not isinstance(other, VersionConstraint): return NotImplemented if not isinstance(other, Version): return -other._cmp(self) if self.major != other.major: return self._cmp_parts(self.major, other.major) if self.minor != other.minor: return self._cmp_parts(self.minor, other.minor) if self.patch != other.patch: return self._cmp_parts(self.patch, other.patch) # Pre-releases always come before no pre-release string. if not self.is_prerelease() and other.is_prerelease(): return 1 if not other.is_prerelease() and self.is_prerelease(): return -1 comparison = self._cmp_lists(self.prerelease, other.prerelease) if comparison != 0: return comparison # Builds always come after no build string. if not self.build and other.build: return -1 if not other.build and self.build: return 1 return self._cmp_lists(self.build, other.build) def _cmp_parts(self, a, b): if a < b: return -1 elif a > b: return 1 return 0 def _cmp_lists(self, a, b): # type: (List, List) -> int for i in range(max(len(a), len(b))): a_part = None if i < len(a): a_part = a[i] b_part = None if i < len(b): b_part = b[i] if a_part == b_part: continue # Missing parts come before present ones. if a_part is None: return -1 if b_part is None: return 1 if isinstance(a_part, int): if isinstance(b_part, int): return self._cmp_parts(a_part, b_part) return -1 else: if isinstance(b_part, int): return 1 return self._cmp_parts(a_part, b_part) return 0 def __eq__(self, other): # type: (Version) -> bool if not isinstance(other, Version): return NotImplemented return ( self._major == other.major and self._minor == other.minor and self._patch == other.patch and self._prerelease == other.prerelease and self._build == other.build ) def __ne__(self, other): return not self == other def __str__(self): return self._text def __repr__(self): return "".format(str(self)) def __hash__(self): return hash( ( self.major, self.minor, self.patch, ".".join(str(p) for p in self.prerelease), ".".join(str(p) for p in self.build), ) ) PK!#OO#poetry/semver/version_constraint.pyclass VersionConstraint: def is_empty(self): # type: () -> bool raise NotImplementedError() def is_any(self): # type: () -> bool raise NotImplementedError() def allows(self, version): # type: (Version) -> bool raise NotImplementedError() def allows_all(self, other): # type: (VersionConstraint) -> bool raise NotImplementedError() def allows_any(self, other): # type: (VersionConstraint) -> bool raise NotImplementedError() def intersect(self, other): # type: (VersionConstraint) -> VersionConstraint raise NotImplementedError() def union(self, other): # type: (VersionConstraint) -> VersionConstraint raise NotImplementedError() def difference(self, other): # type: (VersionConstraint) -> VersionConstraint raise NotImplementedError() PK!^22poetry/semver/version_range.pyfrom .empty_constraint import EmptyConstraint from .version_constraint import VersionConstraint from .version_union import VersionUnion class VersionRange(VersionConstraint): def __init__( self, min=None, max=None, include_min=False, include_max=False, always_include_max_prerelease=False, ): self._min = min self._max = max self._include_min = include_min self._include_max = include_max @property def min(self): return self._min @property def max(self): return self._max @property def include_min(self): return self._include_min @property def include_max(self): return self._include_max def is_empty(self): return False def is_any(self): return self._min is None and self._max is None def allows(self, other): # type: (Version) -> bool if self._min is not None: if other < self._min: return False if not self._include_min and other == self._min: return False if self._max is not None: if other > self._max: return False if not self._include_max and other == self._max: return False return True def allows_all(self, other): # type: (VersionConstraint) -> bool from .version import Version if other.is_empty(): return True if isinstance(other, Version): return self.allows(other) if isinstance(other, VersionUnion): return all([self.allows_all(constraint) for constraint in other.ranges]) if isinstance(other, VersionRange): return not other.allows_lower(self) and not other.allows_higher(self) raise ValueError("Unknown VersionConstraint type {}.".format(other)) def allows_any(self, other): # type: (VersionConstraint) -> bool from .version import Version if other.is_empty(): return False if isinstance(other, Version): return self.allows(other) if isinstance(other, VersionUnion): return any([self.allows_any(constraint) for constraint in other.ranges]) if isinstance(other, VersionRange): return not other.is_strictly_lower(self) and not other.is_strictly_higher( self ) raise ValueError("Unknown VersionConstraint type {}.".format(other)) def intersect(self, other): # type: (VersionConstraint) -> VersionConstraint from .version import Version if other.is_empty(): return other if isinstance(other, VersionUnion): return other.intersect(self) # A range and a Version just yields the version if it's in the range. if isinstance(other, Version): if self.allows(other): return other return EmptyConstraint() if not isinstance(other, VersionRange): raise ValueError("Unknown VersionConstraint type {}.".format(other)) if self.allows_lower(other): if self.is_strictly_lower(other): return EmptyConstraint() intersect_min = other.min intersect_include_min = other.include_min else: if other.is_strictly_lower(self): return EmptyConstraint() intersect_min = self._min intersect_include_min = self._include_min if self.allows_higher(other): intersect_max = other.max intersect_include_max = other.include_max else: intersect_max = self._max intersect_include_max = self._include_max if intersect_min is None and intersect_max is None: return VersionRange() # If the range is just a single version. if intersect_min == intersect_max: # Because we already verified that the lower range isn't strictly # lower, there must be some overlap. assert intersect_include_min and intersect_include_max return intersect_min # If we got here, there is an actual range. return VersionRange( intersect_min, intersect_max, intersect_include_min, intersect_include_max ) def union(self, other): # type: (VersionConstraint) -> VersionConstraint from .version import Version if isinstance(other, Version): if self.allows(other): return self if other == self.min: return VersionRange( self.min, self.max, include_min=True, include_max=self.include_max ) if other == self.max: return VersionRange( self.min, self.max, include_min=self.include_min, include_max=True ) return VersionUnion.of(self, other) if isinstance(other, VersionRange): # If the two ranges don't overlap, we won't be able to create a single # VersionRange for both of them. edges_touch = ( self.max == other.min and (self.include_max or other.include_min) ) or (self.min == other.max and (self.include_min or other.include_max)) if not edges_touch and not self.allows_any(other): return VersionUnion.of(self, other) if self.allows_lower(other): union_min = self.min union_include_min = self.include_min else: union_min = other.min union_include_min = other.include_min if self.allows_higher(other): union_max = self.max union_include_max = self.include_max else: union_max = other.max union_include_max = other.include_max return VersionRange( union_min, union_max, include_min=union_include_min, include_max=union_include_max, ) return VersionUnion.of(self, other) def difference(self, other): # type: (VersionConstraint) -> VersionConstraint from .version import Version if other.is_empty(): return self if isinstance(other, Version): if not self.allows(other): return self if other == self.min: if not self.include_min: return self return VersionRange(self.min, self.max, False, self.include_max) if other == self.max: if not self.include_max: return self return VersionRange(self.min, self.max, self.include_min, False) return VersionUnion.of( VersionRange(self.min, other, self.include_min, False), VersionRange(other, self.max, False, self.include_max), ) elif isinstance(other, VersionRange): if not self.allows_any(other): return self if not self.allows_lower(other): before = None elif self.min == other.min: before = self.min else: before = VersionRange( self.min, other.min, self.include_min, not other.include_min ) if not self.allows_higher(other): after = None elif self.max == other.max: after = self.max else: after = VersionRange( other.max, self.max, not other.include_max, self.include_max ) if before is None and after is None: return EmptyConstraint() if before is None: return after if after is None: return before return VersionUnion.of(before, after) elif isinstance(other, VersionUnion): ranges = [] # type: List[VersionRange] current = self for range in other.ranges: # Skip any ranges that are strictly lower than [current]. if range.is_strictly_lower(current): continue # If we reach a range strictly higher than [current], no more ranges # will be relevant so we can bail early. if range.is_strictly_higher(current): break difference = current.difference(range) if difference.is_empty(): return EmptyConstraint() elif isinstance(difference, VersionUnion): # If [range] split [current] in half, we only need to continue # checking future ranges against the latter half. ranges.append(difference.ranges[0]) current = difference.ranges[-1] else: current = difference if not ranges: return current return VersionUnion.of(*(ranges + [current])) raise ValueError("Unknown VersionConstraint type {}.".format(other)) def allows_lower(self, other): # type: (VersionRange) -> bool if self.min is None: return other.min is not None if other.min is None: return False if self.min < other.min: return True if self.min > other.min: return False return self.include_min and not other.include_min def allows_higher(self, other): # type: (VersionRange) -> bool if self.max is None: return other.max is not None if other.max is None: return False if self.max < other.max: return False if self.max > other.max: return True return self.include_max and not other.include_max def is_strictly_lower(self, other): # type: (VersionRange) -> bool if self.max is None or other.min is None: return False if self.max < other.min: return True if self.max > other.min: return False return not self.include_max or not other.include_min def is_strictly_higher(self, other): # type: (VersionRange) -> bool return other.is_strictly_lower(self) def is_adjacent_to(self, other): # type: (VersionRange) -> bool if self.max != other.min: return False return ( self.include_max and not other.include_min or not self.include_max and other.include_min ) def __eq__(self, other): if not isinstance(other, VersionRange): return False return ( self._min == other.min and self._max == other.max and self._include_min == other.include_min and self._include_max == other.include_max ) def __lt__(self, other): return self._cmp(other) < 0 def __le__(self, other): return self._cmp(other) <= 0 def __gt__(self, other): return self._cmp(other) > 0 def __ge__(self, other): return self._cmp(other) >= 0 def _cmp(self, other): # type: (VersionRange) -> int if self.min is None: if other.min is None: return self._compare_max(other) return -1 elif other.min is None: return 1 result = self.min._cmp(other.min) if result != 0: return result if self.include_min != other.include_min: return -1 if self.include_min else 1 return self._compare_max(other) def _compare_max(self, other): # type: (VersionRange) -> int if self.max is None: if other.max is None: return 0 return 1 elif other.max is None: return -1 result = self.max._cmp(other.max) if result != 0: return result if self.include_max != other.include_max: return 1 if self.include_max else -1 return 0 def __str__(self): text = "" if self.min is not None: text += ">=" if self.include_min else ">" text += self.min.text if self.max is not None: if self.min is not None: text += "," text += "{}{}".format("<=" if self.include_max else "<", self.max.text) if self.min is None and self.max is None: return "*" return text def __repr__(self): return "".format(str(self)) def __hash__(self): return hash((self.min, self.max, self.include_min, self.include_max)) PK!FFpoetry/semver/version_union.pyfrom .empty_constraint import EmptyConstraint from .version_constraint import VersionConstraint class VersionUnion(VersionConstraint): """ A version constraint representing a union of multiple disjoint version ranges. An instance of this will only be created if the version can't be represented as a non-compound value. """ def __init__(self, *ranges): self._ranges = list(ranges) @property def ranges(self): return self._ranges @classmethod def of(cls, *ranges): from .version_range import VersionRange flattened = [] for constraint in ranges: if constraint.is_empty(): continue if isinstance(constraint, VersionUnion): flattened += constraint.ranges continue flattened.append(constraint) if not flattened: return EmptyConstraint() if any([constraint.is_any() for constraint in flattened]): return VersionRange() # Only allow Versions and VersionRanges here so we can more easily reason # about everything in flattened. _EmptyVersions and VersionUnions are # filtered out above. for constraint in flattened: if isinstance(constraint, VersionRange): continue raise ValueError("Unknown VersionConstraint type {}.".format(constraint)) flattened.sort() merged = [] for constraint in flattened: # Merge this constraint with the previous one, but only if they touch. if not merged or ( not merged[-1].allows_any(constraint) and not merged[-1].is_adjacent_to(constraint) ): merged.append(constraint) else: merged[-1] = merged[-1].union(constraint) if len(merged) == 1: return merged[0] return VersionUnion(*merged) def is_empty(self): return False def is_any(self): return False def allows(self, version): # type: (Version) -> bool return any([constraint.allows(version) for constraint in self._ranges]) def allows_all(self, other): # type: (VersionConstraint) -> bool our_ranges = iter(self._ranges) their_ranges = iter(self._ranges_for(other)) our_current_range = next(our_ranges, None) their_current_range = next(their_ranges, None) while our_current_range and their_current_range: if our_current_range.allows_all(their_current_range): their_current_range = next(their_ranges, None) else: our_current_range = next(our_ranges, None) return their_current_range is None def allows_any(self, other): # type: (VersionConstraint) -> bool our_ranges = iter(self._ranges) their_ranges = iter(self._ranges_for(other)) our_current_range = next(our_ranges, None) their_current_range = next(their_ranges, None) while our_current_range and their_current_range: if our_current_range.allows_any(their_current_range): return True if their_current_range.allows_higher(our_current_range): our_current_range = next(our_ranges, None) else: their_current_range = next(their_ranges, None) return False def intersect(self, other): # type: (VersionConstraint) -> VersionConstraint our_ranges = iter(self._ranges) their_ranges = iter(self._ranges_for(other)) new_ranges = [] our_current_range = next(our_ranges, None) their_current_range = next(their_ranges, None) while our_current_range and their_current_range: intersection = our_current_range.intersect(their_current_range) if not intersection.is_empty(): new_ranges.append(intersection) if their_current_range.allows_higher(our_current_range): our_current_range = next(our_ranges, None) else: their_current_range = next(their_ranges, None) return VersionUnion.of(*new_ranges) def union(self, other): # type: (VersionConstraint) -> VersionConstraint return VersionUnion.of(self, other) def difference(self, other): # type: (VersionConstraint) -> VersionConstraint our_ranges = iter(self._ranges) their_ranges = iter(self._ranges_for(other)) new_ranges = [] state = { "current": next(our_ranges, None), "their_range": next(their_ranges, None), } def their_next_range(): state["their_range"] = next(their_ranges, None) if state["their_range"]: return True new_ranges.append(state["current"]) our_current = next(our_ranges, None) while our_current: new_ranges.append(our_current) our_current = next(our_ranges, None) return False def our_next_range(include_current=True): if include_current: new_ranges.append(state["current"]) our_current = next(our_ranges, None) if not our_current: return False state["current"] = our_current return True while True: if state["their_range"].is_strictly_lower(state["current"]): if not their_next_range(): break continue if state["their_range"].is_strictly_higher(state["current"]): if not our_next_range(): break continue difference = state["current"].difference(state["their_range"]) if isinstance(difference, VersionUnion): assert len(difference.ranges) == 2 new_ranges.append(difference.ranges[0]) state["current"] = difference.ranges[-1] if not their_next_range(): break elif difference.is_empty(): if not our_next_range(False): break else: state["current"] = difference if state["current"].allows_higher(state["their_range"]): if not their_next_range(): break else: if not our_next_range(): break if not new_ranges: return EmptyConstraint() if len(new_ranges) == 1: return new_ranges[0] return VersionUnion.of(*new_ranges) def _ranges_for( self, constraint ): # type: (VersionConstraint) -> List[VersionRange] from .version_range import VersionRange if constraint.is_empty(): return [] if isinstance(constraint, VersionUnion): return constraint.ranges if isinstance(constraint, VersionRange): return [constraint] raise ValueError("Unknown VersionConstraint type {}".format(constraint)) def __eq__(self, other): if not isinstance(other, VersionUnion): return False return self._ranges == other.ranges def __str__(self): return " || ".join([str(r) for r in self._ranges]) def __repr__(self): return "".format(str(self)) PK!A poetry/spdx/__init__.pyimport json import os from .license import License from .updater import Updater _licenses = None def license_by_id(identifier): if _licenses is None: load_licenses() id = identifier.lower() if id not in _licenses: raise ValueError("Invalid license id: {}".format(identifier)) return _licenses[id] def load_licenses(): global _licenses _licenses = {} licenses_file = os.path.join(os.path.dirname(__file__), "data", "licenses.json") with open(licenses_file) as f: data = json.loads(f.read()) for name, license in data.items(): _licenses[name.lower()] = License(name, license[0], license[1], license[2]) if __name__ == "__main__": updater = Updater() updater.dump() PK!=|uupoetry/spdx/data/licenses.json{ "0BSD": [ "BSD Zero Clause License", false, false ], "AAL": [ "Attribution Assurance License", true, false ], "ADSL": [ "Amazon Digital Services License", false, false ], "AFL-1.1": [ "Academic Free License v1.1", true, false ], "AFL-1.2": [ "Academic Free License v1.2", true, false ], "AFL-2.0": [ "Academic Free License v2.0", true, false ], "AFL-2.1": [ "Academic Free License v2.1", true, false ], "AFL-3.0": [ "Academic Free License v3.0", true, false ], "AGPL-1.0": [ "Affero General Public License v1.0", false, false ], "AGPL-3.0": [ "GNU Affero General Public License v3.0", true, true ], "AGPL-3.0-only": [ "GNU Affero General Public License v3.0 only", true, false ], "AGPL-3.0-or-later": [ "GNU Affero General Public License v3.0 or later", true, false ], "AMDPLPA": [ "AMD's plpa_map.c License", false, false ], "AML": [ "Apple MIT License", false, false ], "AMPAS": [ "Academy of Motion Picture Arts and Sciences BSD", false, false ], "ANTLR-PD": [ "ANTLR Software Rights Notice", false, false ], "APAFML": [ "Adobe Postscript AFM License", false, false ], "APL-1.0": [ "Adaptive Public License 1.0", true, false ], "APSL-1.0": [ "Apple Public Source License 1.0", true, false ], "APSL-1.1": [ "Apple Public Source License 1.1", true, false ], "APSL-1.2": [ "Apple Public Source License 1.2", true, false ], "APSL-2.0": [ "Apple Public Source License 2.0", true, false ], "Abstyles": [ "Abstyles License", false, false ], "Adobe-2006": [ "Adobe Systems Incorporated Source Code License Agreement", false, false ], "Adobe-Glyph": [ "Adobe Glyph List License", false, false ], "Afmparse": [ "Afmparse License", false, false ], "Aladdin": [ "Aladdin Free Public License", false, false ], "Apache-1.0": [ "Apache License 1.0", false, false ], "Apache-1.1": [ "Apache License 1.1", true, false ], "Apache-2.0": [ "Apache License 2.0", true, false ], "Artistic-1.0": [ "Artistic License 1.0", true, false ], "Artistic-1.0-Perl": [ "Artistic License 1.0 (Perl)", true, false ], "Artistic-1.0-cl8": [ "Artistic License 1.0 w/clause 8", true, false ], "Artistic-2.0": [ "Artistic License 2.0", true, false ], "BSD-1-Clause": [ "BSD 1-Clause License", false, false ], "BSD-2-Clause": [ "BSD 2-Clause \"Simplified\" License", true, false ], "BSD-2-Clause-FreeBSD": [ "BSD 2-Clause FreeBSD License", false, false ], "BSD-2-Clause-NetBSD": [ "BSD 2-Clause NetBSD License", false, false ], "BSD-2-Clause-Patent": [ "BSD-2-Clause Plus Patent License", true, false ], "BSD-3-Clause": [ "BSD 3-Clause \"New\" or \"Revised\" License", true, false ], "BSD-3-Clause-Attribution": [ "BSD with attribution", false, false ], "BSD-3-Clause-Clear": [ "BSD 3-Clause Clear License", false, false ], "BSD-3-Clause-LBNL": [ "Lawrence Berkeley National Labs BSD variant license", false, false ], "BSD-3-Clause-No-Nuclear-License": [ "BSD 3-Clause No Nuclear License", false, false ], "BSD-3-Clause-No-Nuclear-License-2014": [ "BSD 3-Clause No Nuclear License 2014", false, false ], "BSD-3-Clause-No-Nuclear-Warranty": [ "BSD 3-Clause No Nuclear Warranty", false, false ], "BSD-4-Clause": [ "BSD 4-Clause \"Original\" or \"Old\" License", false, false ], "BSD-4-Clause-UC": [ "BSD-4-Clause (University of California-Specific)", false, false ], "BSD-Protection": [ "BSD Protection License", false, false ], "BSD-Source-Code": [ "BSD Source Code Attribution", false, false ], "BSL-1.0": [ "Boost Software License 1.0", true, false ], "Bahyph": [ "Bahyph License", false, false ], "Barr": [ "Barr License", false, false ], "Beerware": [ "Beerware License", false, false ], "BitTorrent-1.0": [ "BitTorrent Open Source License v1.0", false, false ], "BitTorrent-1.1": [ "BitTorrent Open Source License v1.1", false, false ], "Borceux": [ "Borceux license", false, false ], "CATOSL-1.1": [ "Computer Associates Trusted Open Source License 1.1", true, false ], "CC-BY-1.0": [ "Creative Commons Attribution 1.0", false, false ], "CC-BY-2.0": [ "Creative Commons Attribution 2.0", false, false ], "CC-BY-2.5": [ "Creative Commons Attribution 2.5", false, false ], "CC-BY-3.0": [ "Creative Commons Attribution 3.0", false, false ], "CC-BY-4.0": [ "Creative Commons Attribution 4.0", false, false ], "CC-BY-NC-1.0": [ "Creative Commons Attribution Non Commercial 1.0", false, false ], "CC-BY-NC-2.0": [ "Creative Commons Attribution Non Commercial 2.0", false, false ], "CC-BY-NC-2.5": [ "Creative Commons Attribution Non Commercial 2.5", false, false ], "CC-BY-NC-3.0": [ "Creative Commons Attribution Non Commercial 3.0", false, false ], "CC-BY-NC-4.0": [ "Creative Commons Attribution Non Commercial 4.0", false, false ], "CC-BY-NC-ND-1.0": [ "Creative Commons Attribution Non Commercial No Derivatives 1.0", false, false ], "CC-BY-NC-ND-2.0": [ "Creative Commons Attribution Non Commercial No Derivatives 2.0", false, false ], "CC-BY-NC-ND-2.5": [ "Creative Commons Attribution Non Commercial No Derivatives 2.5", false, false ], "CC-BY-NC-ND-3.0": [ "Creative Commons Attribution Non Commercial No Derivatives 3.0", false, false ], "CC-BY-NC-ND-4.0": [ "Creative Commons Attribution Non Commercial No Derivatives 4.0", false, false ], "CC-BY-NC-SA-1.0": [ "Creative Commons Attribution Non Commercial Share Alike 1.0", false, false ], "CC-BY-NC-SA-2.0": [ "Creative Commons Attribution Non Commercial Share Alike 2.0", false, false ], "CC-BY-NC-SA-2.5": [ "Creative Commons Attribution Non Commercial Share Alike 2.5", false, false ], "CC-BY-NC-SA-3.0": [ "Creative Commons Attribution Non Commercial Share Alike 3.0", false, false ], "CC-BY-NC-SA-4.0": [ "Creative Commons Attribution Non Commercial Share Alike 4.0", false, false ], "CC-BY-ND-1.0": [ "Creative Commons Attribution No Derivatives 1.0", false, false ], "CC-BY-ND-2.0": [ "Creative Commons Attribution No Derivatives 2.0", false, false ], "CC-BY-ND-2.5": [ "Creative Commons Attribution No Derivatives 2.5", false, false ], "CC-BY-ND-3.0": [ "Creative Commons Attribution No Derivatives 3.0", false, false ], "CC-BY-ND-4.0": [ "Creative Commons Attribution No Derivatives 4.0", false, false ], "CC-BY-SA-1.0": [ "Creative Commons Attribution Share Alike 1.0", false, false ], "CC-BY-SA-2.0": [ "Creative Commons Attribution Share Alike 2.0", false, false ], "CC-BY-SA-2.5": [ "Creative Commons Attribution Share Alike 2.5", false, false ], "CC-BY-SA-3.0": [ "Creative Commons Attribution Share Alike 3.0", false, false ], "CC-BY-SA-4.0": [ "Creative Commons Attribution Share Alike 4.0", false, false ], "CC0-1.0": [ "Creative Commons Zero v1.0 Universal", false, false ], "CDDL-1.0": [ "Common Development and Distribution License 1.0", true, false ], "CDDL-1.1": [ "Common Development and Distribution License 1.1", false, false ], "CDLA-Permissive-1.0": [ "Community Data License Agreement Permissive 1.0", false, false ], "CDLA-Sharing-1.0": [ "Community Data License Agreement Sharing 1.0", false, false ], "CECILL-1.0": [ "CeCILL Free Software License Agreement v1.0", false, false ], "CECILL-1.1": [ "CeCILL Free Software License Agreement v1.1", false, false ], "CECILL-2.0": [ "CeCILL Free Software License Agreement v2.0", false, false ], "CECILL-2.1": [ "CeCILL Free Software License Agreement v2.1", true, false ], "CECILL-B": [ "CeCILL-B Free Software License Agreement", false, false ], "CECILL-C": [ "CeCILL-C Free Software License Agreement", false, false ], "CNRI-Jython": [ "CNRI Jython License", false, false ], "CNRI-Python": [ "CNRI Python License", true, false ], "CNRI-Python-GPL-Compatible": [ "CNRI Python Open Source GPL Compatible License Agreement", false, false ], "CPAL-1.0": [ "Common Public Attribution License 1.0", true, false ], "CPL-1.0": [ "Common Public License 1.0", true, false ], "CPOL-1.02": [ "Code Project Open License 1.02", false, false ], "CUA-OPL-1.0": [ "CUA Office Public License v1.0", true, false ], "Caldera": [ "Caldera License", false, false ], "ClArtistic": [ "Clarified Artistic License", false, false ], "Condor-1.1": [ "Condor Public License v1.1", false, false ], "Crossword": [ "Crossword License", false, false ], "CrystalStacker": [ "CrystalStacker License", false, false ], "Cube": [ "Cube License", false, false ], "D-FSL-1.0": [ "Deutsche Freie Software Lizenz", false, false ], "DOC": [ "DOC License", false, false ], "DSDP": [ "DSDP License", false, false ], "Dotseqn": [ "Dotseqn License", false, false ], "ECL-1.0": [ "Educational Community License v1.0", true, false ], "ECL-2.0": [ "Educational Community License v2.0", true, false ], "EFL-1.0": [ "Eiffel Forum License v1.0", true, false ], "EFL-2.0": [ "Eiffel Forum License v2.0", true, false ], "EPL-1.0": [ "Eclipse Public License 1.0", true, false ], "EPL-2.0": [ "Eclipse Public License 2.0", true, false ], "EUDatagrid": [ "EU DataGrid Software License", true, false ], "EUPL-1.0": [ "European Union Public License 1.0", false, false ], "EUPL-1.1": [ "European Union Public License 1.1", true, false ], "EUPL-1.2": [ "European Union Public License 1.2", true, false ], "Entessa": [ "Entessa Public License v1.0", true, false ], "ErlPL-1.1": [ "Erlang Public License v1.1", false, false ], "Eurosym": [ "Eurosym License", false, false ], "FSFAP": [ "FSF All Permissive License", false, false ], "FSFUL": [ "FSF Unlimited License", false, false ], "FSFULLR": [ "FSF Unlimited License (with License Retention)", false, false ], "FTL": [ "Freetype Project License", false, false ], "Fair": [ "Fair License", true, false ], "Frameworx-1.0": [ "Frameworx Open License 1.0", true, false ], "FreeImage": [ "FreeImage Public License v1.0", false, false ], "GFDL-1.1": [ "GNU Free Documentation License v1.1", false, true ], "GFDL-1.1-only": [ "GNU Free Documentation License v1.1 only", false, false ], "GFDL-1.1-or-later": [ "GNU Free Documentation License v1.1 or later", false, false ], "GFDL-1.2": [ "GNU Free Documentation License v1.2", false, true ], "GFDL-1.2-only": [ "GNU Free Documentation License v1.2 only", false, false ], "GFDL-1.2-or-later": [ "GNU Free Documentation License v1.2 or later", false, false ], "GFDL-1.3": [ "GNU Free Documentation License v1.3", false, true ], "GFDL-1.3-only": [ "GNU Free Documentation License v1.3 only", false, false ], "GFDL-1.3-or-later": [ "GNU Free Documentation License v1.3 or later", false, false ], "GL2PS": [ "GL2PS License", false, false ], "GPL-1.0": [ "GNU General Public License v1.0 only", false, true ], "GPL-1.0+": [ "GNU General Public License v1.0 or later", false, true ], "GPL-1.0-only": [ "GNU General Public License v1.0 only", false, false ], "GPL-1.0-or-later": [ "GNU General Public License v1.0 or later", false, false ], "GPL-2.0": [ "GNU General Public License v2.0 only", true, true ], "GPL-2.0+": [ "GNU General Public License v2.0 or later", true, true ], "GPL-2.0-only": [ "GNU General Public License v2.0 only", true, false ], "GPL-2.0-or-later": [ "GNU General Public License v2.0 or later", true, false ], "GPL-2.0-with-GCC-exception": [ "GNU General Public License v2.0 w/GCC Runtime Library exception", false, true ], "GPL-2.0-with-autoconf-exception": [ "GNU General Public License v2.0 w/Autoconf exception", false, true ], "GPL-2.0-with-bison-exception": [ "GNU General Public License v2.0 w/Bison exception", false, true ], "GPL-2.0-with-classpath-exception": [ "GNU General Public License v2.0 w/Classpath exception", false, true ], "GPL-2.0-with-font-exception": [ "GNU General Public License v2.0 w/Font exception", false, true ], "GPL-3.0": [ "GNU General Public License v3.0 only", true, true ], "GPL-3.0+": [ "GNU General Public License v3.0 or later", true, true ], "GPL-3.0-only": [ "GNU General Public License v3.0 only", true, false ], "GPL-3.0-or-later": [ "GNU General Public License v3.0 or later", true, false ], "GPL-3.0-with-GCC-exception": [ "GNU General Public License v3.0 w/GCC Runtime Library exception", true, true ], "GPL-3.0-with-autoconf-exception": [ "GNU General Public License v3.0 w/Autoconf exception", false, true ], "Giftware": [ "Giftware License", false, false ], "Glide": [ "3dfx Glide License", false, false ], "Glulxe": [ "Glulxe License", false, false ], "HPND": [ "Historical Permission Notice and Disclaimer", true, false ], "HaskellReport": [ "Haskell Language Report License", false, false ], "IBM-pibs": [ "IBM PowerPC Initialization and Boot Software", false, false ], "ICU": [ "ICU License", false, false ], "IJG": [ "Independent JPEG Group License", false, false ], "IPA": [ "IPA Font License", true, false ], "IPL-1.0": [ "IBM Public License v1.0", true, false ], "ISC": [ "ISC License", true, false ], "ImageMagick": [ "ImageMagick License", false, false ], "Imlib2": [ "Imlib2 License", false, false ], "Info-ZIP": [ "Info-ZIP License", false, false ], "Intel": [ "Intel Open Source License", true, false ], "Intel-ACPI": [ "Intel ACPI Software License Agreement", false, false ], "Interbase-1.0": [ "Interbase Public License v1.0", false, false ], "JSON": [ "JSON License", false, false ], "JasPer-2.0": [ "JasPer License", false, false ], "LAL-1.2": [ "Licence Art Libre 1.2", false, false ], "LAL-1.3": [ "Licence Art Libre 1.3", false, false ], "LGPL-2.0": [ "GNU Library General Public License v2 only", true, true ], "LGPL-2.0+": [ "GNU Library General Public License v2 or later", true, true ], "LGPL-2.0-only": [ "GNU Library General Public License v2 only", true, false ], "LGPL-2.0-or-later": [ "GNU Library General Public License v2 or later", true, false ], "LGPL-2.1": [ "GNU Lesser General Public License v2.1 only", true, true ], "LGPL-2.1+": [ "GNU Library General Public License v2 or later", true, true ], "LGPL-2.1-only": [ "GNU Lesser General Public License v2.1 only", true, false ], "LGPL-2.1-or-later": [ "GNU Lesser General Public License v2.1 or later", true, false ], "LGPL-3.0": [ "GNU Lesser General Public License v3.0 only", true, true ], "LGPL-3.0+": [ "GNU Lesser General Public License v3.0 or later", true, true ], "LGPL-3.0-only": [ "GNU Lesser General Public License v3.0 only", true, false ], "LGPL-3.0-or-later": [ "GNU Lesser General Public License v3.0 or later", true, false ], "LGPLLR": [ "Lesser General Public License For Linguistic Resources", false, false ], "LPL-1.0": [ "Lucent Public License Version 1.0", true, false ], "LPL-1.02": [ "Lucent Public License v1.02", true, false ], "LPPL-1.0": [ "LaTeX Project Public License v1.0", false, false ], "LPPL-1.1": [ "LaTeX Project Public License v1.1", false, false ], "LPPL-1.2": [ "LaTeX Project Public License v1.2", false, false ], "LPPL-1.3a": [ "LaTeX Project Public License v1.3a", false, false ], "LPPL-1.3c": [ "LaTeX Project Public License v1.3c", true, false ], "Latex2e": [ "Latex2e License", false, false ], "Leptonica": [ "Leptonica License", false, false ], "LiLiQ-P-1.1": [ "Licence Libre du Qu\u00e9bec \u2013 Permissive version 1.1", true, false ], "LiLiQ-R-1.1": [ "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 version 1.1", true, false ], "LiLiQ-Rplus-1.1": [ "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 forte version 1.1", true, false ], "Libpng": [ "libpng License", false, false ], "MIT": [ "MIT License", true, false ], "MIT-CMU": [ "CMU License", false, false ], "MIT-advertising": [ "Enlightenment License (e16)", false, false ], "MIT-enna": [ "enna License", false, false ], "MIT-feh": [ "feh License", false, false ], "MITNFA": [ "MIT +no-false-attribs license", false, false ], "MPL-1.0": [ "Mozilla Public License 1.0", true, false ], "MPL-1.1": [ "Mozilla Public License 1.1", true, false ], "MPL-2.0": [ "Mozilla Public License 2.0", true, false ], "MPL-2.0-no-copyleft-exception": [ "Mozilla Public License 2.0 (no copyleft exception)", true, false ], "MS-PL": [ "Microsoft Public License", true, false ], "MS-RL": [ "Microsoft Reciprocal License", true, false ], "MTLL": [ "Matrix Template Library License", false, false ], "MakeIndex": [ "MakeIndex License", false, false ], "MirOS": [ "MirOS License", true, false ], "Motosoto": [ "Motosoto License", true, false ], "Multics": [ "Multics License", true, false ], "Mup": [ "Mup License", false, false ], "NASA-1.3": [ "NASA Open Source Agreement 1.3", true, false ], "NBPL-1.0": [ "Net Boolean Public License v1", false, false ], "NCSA": [ "University of Illinois/NCSA Open Source License", true, false ], "NGPL": [ "Nethack General Public License", true, false ], "NLOD-1.0": [ "Norwegian Licence for Open Government Data", false, false ], "NLPL": [ "No Limit Public License", false, false ], "NOSL": [ "Netizen Open Source License", false, false ], "NPL-1.0": [ "Netscape Public License v1.0", false, false ], "NPL-1.1": [ "Netscape Public License v1.1", false, false ], "NPOSL-3.0": [ "Non-Profit Open Software License 3.0", true, false ], "NRL": [ "NRL License", false, false ], "NTP": [ "NTP License", true, false ], "Naumen": [ "Naumen Public License", true, false ], "Net-SNMP": [ "Net-SNMP License", false, false ], "NetCDF": [ "NetCDF license", false, false ], "Newsletr": [ "Newsletr License", false, false ], "Nokia": [ "Nokia Open Source License", true, false ], "Noweb": [ "Noweb License", false, false ], "Nunit": [ "Nunit License", false, true ], "OCCT-PL": [ "Open CASCADE Technology Public License", false, false ], "OCLC-2.0": [ "OCLC Research Public License 2.0", true, false ], "ODbL-1.0": [ "ODC Open Database License v1.0", false, false ], "OFL-1.0": [ "SIL Open Font License 1.0", false, false ], "OFL-1.1": [ "SIL Open Font License 1.1", true, false ], "OGTSL": [ "Open Group Test Suite License", true, false ], "OLDAP-1.1": [ "Open LDAP Public License v1.1", false, false ], "OLDAP-1.2": [ "Open LDAP Public License v1.2", false, false ], "OLDAP-1.3": [ "Open LDAP Public License v1.3", false, false ], "OLDAP-1.4": [ "Open LDAP Public License v1.4", false, false ], "OLDAP-2.0": [ "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)", false, false ], "OLDAP-2.0.1": [ "Open LDAP Public License v2.0.1", false, false ], "OLDAP-2.1": [ "Open LDAP Public License v2.1", false, false ], "OLDAP-2.2": [ "Open LDAP Public License v2.2", false, false ], "OLDAP-2.2.1": [ "Open LDAP Public License v2.2.1", false, false ], "OLDAP-2.2.2": [ "Open LDAP Public License 2.2.2", false, false ], "OLDAP-2.3": [ "Open LDAP Public License v2.3", false, false ], "OLDAP-2.4": [ "Open LDAP Public License v2.4", false, false ], "OLDAP-2.5": [ "Open LDAP Public License v2.5", false, false ], "OLDAP-2.6": [ "Open LDAP Public License v2.6", false, false ], "OLDAP-2.7": [ "Open LDAP Public License v2.7", false, false ], "OLDAP-2.8": [ "Open LDAP Public License v2.8", false, false ], "OML": [ "Open Market License", false, false ], "OPL-1.0": [ "Open Public License v1.0", false, false ], "OSET-PL-2.1": [ "OSET Public License version 2.1", true, false ], "OSL-1.0": [ "Open Software License 1.0", true, false ], "OSL-1.1": [ "Open Software License 1.1", false, false ], "OSL-2.0": [ "Open Software License 2.0", true, false ], "OSL-2.1": [ "Open Software License 2.1", true, false ], "OSL-3.0": [ "Open Software License 3.0", true, false ], "OpenSSL": [ "OpenSSL License", false, false ], "PDDL-1.0": [ "ODC Public Domain Dedication & License 1.0", false, false ], "PHP-3.0": [ "PHP License v3.0", true, false ], "PHP-3.01": [ "PHP License v3.01", false, false ], "Plexus": [ "Plexus Classworlds License", false, false ], "PostgreSQL": [ "PostgreSQL License", true, false ], "Python-2.0": [ "Python License 2.0", true, false ], "QPL-1.0": [ "Q Public License 1.0", true, false ], "Qhull": [ "Qhull License", false, false ], "RHeCos-1.1": [ "Red Hat eCos Public License v1.1", false, false ], "RPL-1.1": [ "Reciprocal Public License 1.1", true, false ], "RPL-1.5": [ "Reciprocal Public License 1.5", true, false ], "RPSL-1.0": [ "RealNetworks Public Source License v1.0", true, false ], "RSA-MD": [ "RSA Message-Digest License ", false, false ], "RSCPL": [ "Ricoh Source Code Public License", true, false ], "Rdisc": [ "Rdisc License", false, false ], "Ruby": [ "Ruby License", false, false ], "SAX-PD": [ "Sax Public Domain Notice", false, false ], "SCEA": [ "SCEA Shared Source License", false, false ], "SGI-B-1.0": [ "SGI Free Software License B v1.0", false, false ], "SGI-B-1.1": [ "SGI Free Software License B v1.1", false, false ], "SGI-B-2.0": [ "SGI Free Software License B v2.0", false, false ], "SISSL": [ "Sun Industry Standards Source License v1.1", true, false ], "SISSL-1.2": [ "Sun Industry Standards Source License v1.2", false, false ], "SMLNJ": [ "Standard ML of New Jersey License", false, false ], "SMPPL": [ "Secure Messaging Protocol Public License", false, false ], "SNIA": [ "SNIA Public License 1.1", false, false ], "SPL-1.0": [ "Sun Public License v1.0", true, false ], "SWL": [ "Scheme Widget Library (SWL) Software License Agreement", false, false ], "Saxpath": [ "Saxpath License", false, false ], "Sendmail": [ "Sendmail License", false, false ], "SimPL-2.0": [ "Simple Public License 2.0", true, false ], "Sleepycat": [ "Sleepycat License", true, false ], "Spencer-86": [ "Spencer License 86", false, false ], "Spencer-94": [ "Spencer License 94", false, false ], "Spencer-99": [ "Spencer License 99", false, false ], "StandardML-NJ": [ "Standard ML of New Jersey License", false, true ], "SugarCRM-1.1.3": [ "SugarCRM Public License v1.1.3", false, false ], "TCL": [ "TCL/TK License", false, false ], "TCP-wrappers": [ "TCP Wrappers License", false, false ], "TMate": [ "TMate Open Source License", false, false ], "TORQUE-1.1": [ "TORQUE v2.5+ Software License v1.1", false, false ], "TOSL": [ "Trusster Open Source License", false, false ], "UPL-1.0": [ "Universal Permissive License v1.0", true, false ], "Unicode-DFS-2015": [ "Unicode License Agreement - Data Files and Software (2015)", false, false ], "Unicode-DFS-2016": [ "Unicode License Agreement - Data Files and Software (2016)", false, false ], "Unicode-TOU": [ "Unicode Terms of Use", false, false ], "Unlicense": [ "The Unlicense", false, false ], "VOSTROM": [ "VOSTROM Public License for Open Source", false, false ], "VSL-1.0": [ "Vovida Software License v1.0", true, false ], "Vim": [ "Vim License", false, false ], "W3C": [ "W3C Software Notice and License (2002-12-31)", true, false ], "W3C-19980720": [ "W3C Software Notice and License (1998-07-20)", false, false ], "W3C-20150513": [ "W3C Software Notice and Document License (2015-05-13)", false, false ], "WTFPL": [ "Do What The F*ck You Want To Public License", false, false ], "Watcom-1.0": [ "Sybase Open Watcom Public License 1.0", true, false ], "Wsuipa": [ "Wsuipa License", false, false ], "X11": [ "X11 License", false, false ], "XFree86-1.1": [ "XFree86 License 1.1", false, false ], "XSkat": [ "XSkat License", false, false ], "Xerox": [ "Xerox License", false, false ], "Xnet": [ "X.Net License", true, false ], "YPL-1.0": [ "Yahoo! Public License v1.0", false, false ], "YPL-1.1": [ "Yahoo! Public License v1.1", false, false ], "ZPL-1.1": [ "Zope Public License 1.1", false, false ], "ZPL-2.0": [ "Zope Public License 2.0", true, false ], "ZPL-2.1": [ "Zope Public License 2.1", false, false ], "Zed": [ "Zed License", false, false ], "Zend-2.0": [ "Zend License v2.0", false, false ], "Zimbra-1.3": [ "Zimbra Public License v1.3", false, false ], "Zimbra-1.4": [ "Zimbra Public License v1.4", false, false ], "Zlib": [ "zlib License", true, false ], "bzip2-1.0.5": [ "bzip2 and libbzip2 License v1.0.5", false, false ], "bzip2-1.0.6": [ "bzip2 and libbzip2 License v1.0.6", false, false ], "curl": [ "curl License", false, false ], "diffmark": [ "diffmark license", false, false ], "dvipdfm": [ "dvipdfm License", false, false ], "eCos-2.0": [ "eCos license version 2.0", false, true ], "eGenix": [ "eGenix.com Public License 1.1.0", false, false ], "gSOAP-1.3b": [ "gSOAP Public License v1.3b", false, false ], "gnuplot": [ "gnuplot License", false, false ], "iMatix": [ "iMatix Standard Function Library Agreement", false, false ], "libtiff": [ "libtiff License", false, false ], "mpich2": [ "mpich2 License", false, false ], "psfrag": [ "psfrag License", false, false ], "psutils": [ "psutils License", false, false ], "wxWindows": [ "wxWindows Library License", false, true ], "xinetd": [ "xinetd License", false, false ], "xpp": [ "XPP License", false, false ], "zlib-acknowledgement": [ "zlib/libpng License with Acknowledgement", false, false ] }PK!japoetry/spdx/license.pyfrom collections import namedtuple class License(namedtuple("License", "id name is_osi_approved is_deprecated")): CLASSIFIER_SUPPORTED = { # Not OSI Approved "Aladdin", "CC0-1.0", "CECILL-B", "CECILL-C", "NPL-1.0", "NPL-1.1", # OSI Approved "AFPL", "AFL-1.1", "AFL-1.2", "AFL-2.0", "AFL-2.1", "AFL-3.0", "Apache-1.1", "Apache-2.0", "APSL-1.1", "APSL-1.2", "APSL-2.0", "Artistic-1.0", "Artistic-2.0", "AAL", "AGPL-3.0", "AGPL-3.0-only", "AGPL-3.0-or-later", "BSL-1.0", "BSD-2-Clause", "BSD-3-Clause", "CDDL-1.0", "CECILL-2.1", "CPL-1.0", "EFL-1.0", "EFL-2.0", "EPL-1.0", "EPL-2.0", "EUPL-1.1", "EUPL-1.2", "GPL-2.0", "GPL-2.0+", "GPL-2.0-only", "GPL-2.0-or-later", "GPL-3.0", "GPL-3.0+", "GPL-3.0-only", "GPL-3.0-or-later", "LGPL-2.0", "LGPL-2.0+", "LGPL-2.0-only", "LGPL-2.0-or-later", "LGPL-3.0", "LGPL-3.0+", "LGPL-3.0-only", "LGPL-3.0-or-later", "MIT", "MPL-1.0", "MPL-1.1", "MPL-1.2", "Nokia", "W3C", "ZPL-1.0", "ZPL-2.0", "ZPL-2.1", } CLASSIFIER_NAMES = { # Not OSI Approved "AFPL": "Aladdin Free Public License (AFPL)", "CC0-1.0": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", "CECILL-B": "CeCILL-B Free Software License Agreement (CECILL-B)", "CECILL-C": "CeCILL-C Free Software License Agreement (CECILL-C)", "NPL-1.0": "Netscape Public License (NPL)", "NPL-1.1": "Netscape Public License (NPL)", # OSI Approved "AFL-1.1": "Academic Free License (AFL)", "AFL-1.2": "Academic Free License (AFL)", "AFL-2.0": "Academic Free License (AFL)", "AFL-2.1": "Academic Free License (AFL)", "AFL-3.0": "Academic Free License (AFL)", "Apache-1.1": "Apache Software License", "Apache-2.0": "Apache Software License", "APSL-1.1": "Apple Public Source License", "APSL-1.2": "Apple Public Source License", "APSL-2.0": "Apple Public Source License", "Artistic-1.0": "Artistic License", "Artistic-2.0": "Artistic License", "AAL": "Attribution Assurance License", "AGPL-3.0": "GNU Affero General Public License v3", "AGPL-3.0-only": "GNU Affero General Public License v3", "AGPL-3.0-or-later": "GNU Affero General Public License v3 or later (AGPLv3+)", "BSL-1.0": "Boost Software License 1.0 (BSL-1.0)", "BSD-2-Clause": "BSD License", "BSD-3-Clause": "BSD License", "CDDL-1.0": "Common Development and Distribution License 1.0 (CDDL-1.0)", "CECILL-2.1": "CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)", "CPL-1.0": "Common Public License", "EPL-1.0": "Eclipse Public License 1.0 (EPL-1.0)", "EFL-1.0": "Eiffel Forum License", "EFL-2.0": "Eiffel Forum License", "EUPL-1.1": "European Union Public Licence 1.1 (EUPL 1.1)", "EUPL-1.2": "European Union Public Licence 1.2 (EUPL 1.2)", "GPL-2.0": "GNU General Public License v2 (GPLv2)", "GPL-2.0-only": "GNU General Public License v2 (GPLv2)", "GPL-2.0+": "GNU General Public License v2 or later (GPLv2+)", "GPL-2.0-or-later": "GNU General Public License v2 or later (GPLv2+)", "GPL-3.0": "GNU General Public License v3 (GPLv3)", "GPL-3.0-only": "GNU General Public License v3 (GPLv3)", "GPL-3.0+": "GNU General Public License v3 or later (GPLv3+)", "GPL-3.0-or-later": "GNU General Public License v3 or later (GPLv3+)", "LGPL-2.0": "GNU Lesser General Public License v2 (LGPLv2)", "LGPL-2.0-only": "GNU Lesser General Public License v2 (LGPLv2)", "LGPL-2.0+": "GNU Lesser General Public License v2 or later (LGPLv2+)", "LGPL-2.0-or-later": "GNU Lesser General Public License v2 or later (LGPLv2+)", "LGPL-3.0": "GNU Lesser General Public License v3 (LGPLv3)", "LGPL-3.0-only": "GNU Lesser General Public License v3 (LGPLv3)", "LGPL-3.0+": "GNU Lesser General Public License v3 or later (LGPLv3+)", "LGPL-3.0-or-later": "GNU Lesser General Public License v3 or later (LGPLv3+)", "MPL-1.0": "Mozilla Public License 1.0 (MPL)", "MPL-1.1": "Mozilla Public License 1.1 (MPL 1.1)", "MPL-2.0": "Mozilla Public License 2.0 (MPL 2.0)", "W3C": "W3C License", "ZPL-1.1": "Zope Public License", "ZPL-2.0": "Zope Public License", "ZPL-2.1": "Zope Public License", } @property def classifier(self): parts = ["License"] if self.is_osi_approved: parts.append("OSI Approved") name = self.classifier_name if name is not None: parts.append(name) return " :: ".join(parts) @property def classifier_name(self): if self.id not in self.CLASSIFIER_SUPPORTED: if self.is_osi_approved: return None return "Other/Proprietary License" if self.id in self.CLASSIFIER_NAMES: return self.CLASSIFIER_NAMES[self.id] return self.name PK!XI.poetry/spdx/updater.pyimport json import os try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen class Updater: BASE_URL = "https://raw.githubusercontent.com/spdx/license-list-data/master/json/" def __init__(self, base_url=BASE_URL): self._base_url = base_url def dump(self, file=None): if file is None: file = os.path.join(os.path.dirname(__file__), "data", "licenses.json") licenses_url = self._base_url + "licenses.json" with open(file, "w") as f: f.write( json.dumps(self.get_licenses(licenses_url), indent=2, sort_keys=True) ) def get_licenses(self, url): licenses = {} with urlopen(url) as r: data = json.loads(r.read().decode()) for info in data["licenses"]: licenses[info["licenseId"]] = [ info["name"], info["isOsiApproved"], info["isDeprecatedLicenseId"], ] return licenses PK!kpoetry/toml/__init__.py""" This toml module is a port with changes and fixes of [contoml](https://github.com/jumpscale7/python-consistent-toml). """ from .toml_file import TOMLFile from .prettify.lexer import tokenize as lexer from .prettify.parser import parse_tokens def loads(text): """ Parses TOML text into a dict-like object and returns it. """ tokens = tuple(lexer(text, is_top_level=True)) elements = parse_tokens(tokens) return TOMLFile(elements) def load(file_path): """ Parses a TOML file into a dict-like object and returns it. """ with open(file_path) as fd: return loads(fd.read()) def dumps(value): """ Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ if not isinstance(value, TOMLFile): raise RuntimeError( "Can only dump a TOMLFile instance loaded by load() or loads()" ) return value.dumps() def dump(obj, file_path, prettify=False): """ Dumps a data structure to the filesystem as TOML. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ with open(file_path, "w") as fp: fp.write(dumps(obj)) PK! poetry/toml/array.pyfrom .prettify.errors import InvalidValueError from .freshtable import FreshTable from .prettify import util class ArrayOfTables(list): def __init__(self, toml_file, name, iterable=None): if iterable: list.__init__(self, iterable) self._name = name self._toml_file = toml_file def append(self, value): if isinstance(value, dict): table = FreshTable(parent=self, name=self._name, is_array=True) table._append_to_parent() index = len(self._toml_file[self._name]) - 1 for key_seq, value in util.flatten_nested(value).items(): # self._toml_file._setitem_with_key_seq((self._name, index) + key_seq, value) self._toml_file._array_setitem_with_key_seq( self._name, index, key_seq, value ) # for k, v in value.items(): # table[k] = v else: raise InvalidValueError("Can only append a dict to an array of tables") def __getitem__(self, item): try: return list.__getitem__(self, item) except IndexError: if item == len(self): return FreshTable(parent=self, name=self._name, is_array=True) else: raise def append_fresh_table(self, fresh_table): list.append(self, fresh_table) if self._toml_file: self._toml_file.append_fresh_table(fresh_table) PK!gopoetry/toml/cascadedict.pyimport operator from functools import reduce from . import raw class CascadeDict: """ A dict-like object made up of one or more other dict-like objects where querying for an item cascade-gets it from all the internal dicts in order of their listing, and setting an item sets it on the first dict listed. """ def __init__(self, *internal_dicts): assert internal_dicts, "internal_dicts cannot be empty" self._internal_dicts = tuple(internal_dicts) def cascaded_with(self, one_more_dict): """ Returns another instance with one more dict cascaded at the end. """ dicts = self._internal_dicts + one_more_dict return CascadeDict(*dicts) def __getitem__(self, item): for d in self._internal_dicts: try: return d[item] except KeyError: pass raise KeyError def __setitem__(self, key, value): for d in self._internal_dicts[1:]: if key in d: d[key] = value self._internal_dicts[0][key] = value def get(self, item, default=None): try: return self[item] except KeyError: return default def keys(self): return set(reduce(operator.or_, (set(d.keys()) for d in self._internal_dicts))) def items(self): all_items = reduce( operator.add, (list(d.items()) for d in reversed(self._internal_dicts)) ) unique_items = {k: v for k, v in all_items}.items() return tuple(unique_items) def __contains__(self, item): for d in self._internal_dicts: if item in d: return True return False def __len__(self): return len(self.keys()) @property def neutralized(self): return {k: raw.to_raw(v) for k, v in self.items()} @property def primitive_value(self): return self.neutralized def __repr__(self): return repr(self.primitive_value) PK!opoetry/toml/freshtable.pyfrom .prettify.elements.table import TableElement class FreshTable(TableElement): """ A fresh TableElement that appended itself to each of parents when it first gets written to at most once. parents is a sequence of objects providing an append_fresh_table(TableElement) method """ def __init__(self, parent, name, is_array=False): TableElement.__init__(self, sub_elements=[]) self._parent = parent self._name = name self._is_array = is_array # As long as this flag is false, setitem() operations will append the table header and this table # to the toml_file's elements self.__appended = False @property def name(self): return self._name @property def is_array(self): return self._is_array def _append_to_parent(self): """ Causes this ephemeral table to be persisted on the TOMLFile. """ if self.__appended: return if self._parent is not None: self._parent.append_fresh_table(self) self.__appended = True def __setitem__(self, key, value): TableElement.__setitem__(self, key, value) self._append_to_parent() PK!poetry/toml/peekableit.pyimport itertools class PeekableIterator: # Returned by peek() when the iterator is exhausted. Truthiness is False. Nothing = tuple() def __init__(self, iter): self._iter = iter def __next__(self): return next(self._iter) def next(self): return self.__next__() def __iter__(self): return self def peek(self): """ Returns PeekableIterator.Nothing when the iterator is exhausted. """ try: v = next(self._iter) self._iter = itertools.chain((v,), self._iter) return v except StopIteration: return PeekableIterator.Nothing PK!M poetry/toml/prettify/__init__.pyfrom ._version import VERSION __version__ = VERSION def prettify(toml_text): """ Prettifies and returns the TOML file content provided. """ from .parser import parse_tokens from .lexer import tokenize from .prettifier import prettify as element_prettify tokens = tokenize(toml_text, is_top_level=True) elements = parse_tokens(tokens) prettified = element_prettify(elements) return "".join(pretty_element.serialized() for pretty_element in prettified) def prettify_from_file(file_path): """ Reads, prettifies and returns the TOML file specified by the file_path. """ with open(file_path, "r") as fp: return prettify(fp.read()) PK!s (7 poetry/toml/prettify/_version.pyVERSION = "master" PK!css)poetry/toml/prettify/elements/__init__.py """ TOML file elements (a higher abstraction layer than individual lexical tokens). """ from .traversal import TraversalMixin from .errors import InvalidElementError from .table import TableElement from .tableheader import TableHeaderElement from .common import TYPE_METADATA, TYPE_ATOMIC, TYPE_CONTAINER, TYPE_MARKUP from . import traversal from . import factory PK!& .poetry/toml/prettify/elements/abstracttable.pyfrom .common import ContainerElement from . import traversal class AbstractTable(ContainerElement, traversal.TraversalMixin, dict): """ Common code for handling tables as key-value pairs with metadata elements sprinkled all over. Assumes input sub_elements are correct. """ def __init__(self, sub_elements): ContainerElement.__init__(self, sub_elements) self._fallback = None def _enumerate_items(self): """ Returns ((key_index, key_element), (value_index, value_element)) for all the element key-value pairs. """ non_metadata = self._enumerate_non_metadata_sub_elements() while True: yield next(non_metadata), next(non_metadata) def items(self): for (key_i, key), (value_i, value) in self._enumerate_items(): yield key.value, value.value if self._fallback: for key, value in self._fallback.items(): yield key, value def keys(self): return tuple(key for (key, _) in self.items()) def values(self): return tuple(value for (_, value) in self.items()) def __len__(self): return len(tuple(self._enumerate_items())) def __contains__(self, item): return item in self.keys() def _find_key_and_value(self, key): """ Returns (key_i, value_i) corresponding to the given key value. Raises KeyError if no matching key found. """ for (key_i, key_element), (value_i, value_element) in self._enumerate_items(): if key_element.value == key: return key_i, value_i raise KeyError def __getitem__(self, item): for key, value in self.items(): if key == item: return value raise KeyError def get(self, key, default=None): try: return self[key] except KeyError: return default def set_fallback(self, fallback): """ Sets a fallback dict-like instance to be used to look up values after they are not found in this instance. """ self._fallback = fallback @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ return { key: value.primitive_value if hasattr(value, "primitive_value") else value for key, value in self.items() } PK!:10&poetry/toml/prettify/elements/array.pyfrom . import factory, traversal from .common import Element, ContainerElement from .factory import create_element from .metadata import NewlineElement from .errors import InvalidElementError class ArrayElement(ContainerElement, traversal.TraversalMixin, list): """ A sequence-like container element containing other atomic elements or other containers. Implements list-like interface. Assumes input sub_elements are correct for an array element. Raises an InvalidElementError if contains heterogeneous values. """ def __init__(self, sub_elements): super(ArrayElement, self).__init__(sub_elements) self._check_homogeneity() def _check_homogeneity(self): if len(set(type(v) for v in self.primitive_value)) > 1: raise InvalidElementError("Array should be homogeneous") def __len__(self): return len(tuple(self._enumerate_non_metadata_sub_elements())) def __getitem__(self, i): """ Returns the ith entry, which can be a primitive value, a seq-lie, or a dict-like object. """ return self._find_value(i)[1].value def __setitem__(self, i, value): value_i, _ = self._find_value(i) new_element = ( value if isinstance(value, Element) else factory.create_element(value) ) self._sub_elements = ( self.sub_elements[:value_i] + [new_element] + self.sub_elements[value_i + 1 :] ) @property def value(self): return self # self is a sequence-like value @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ return list( self[i].primitive_value if hasattr(self[i], "primitive_value") else self[i] for i in range(len(self)) ) def __str__(self): return "{}".format(self.primitive_value) def __repr__(self): return "Array{}".format(str(self)) def append(self, v): new_entry = [create_element(v)] if self: # If not empty, we need a comma and whitespace prefix! new_entry = [ factory.create_operator_element(","), factory.create_whitespace_element(), ] + new_entry insertion_index = self._find_closing_square_bracket() self._sub_elements = ( self._sub_elements[:insertion_index] + new_entry + self._sub_elements[insertion_index:] ) def _find_value(self, i): """ Returns (value_index, value) of ith value in this sequence. Raises IndexError if not found. """ return tuple(self._enumerate_non_metadata_sub_elements())[i] def __delitem__(self, i): value_i, value = self._find_value(i) begin, end = value_i, value_i + 1 # Rules: # 1. begin should be index to the preceding comma to the value # 2. end should be index to the following comma, or the closing bracket # 3. If no preceding comma found but following comma found then end should be the index of the following value preceding_comma = self._find_preceding_comma(value_i) found_preceding_comma = preceding_comma >= 0 if found_preceding_comma: begin = preceding_comma following_comma = self._find_following_comma(value_i) if following_comma >= 0: if not found_preceding_comma: end = self._find_following_non_metadata(following_comma) else: end = following_comma else: end = self._find_following_closing_square_bracket(0) self._sub_elements = self.sub_elements[:begin] + self._sub_elements[end:] @property def is_multiline(self): return any(isinstance(e, (NewlineElement)) for e in self.elements) def turn_into_multiline(self): """ Turns this array into a multi-line array with each element lying on its own line. """ if self.is_multiline: return i = self._find_following_comma(-1) def next_entry_i(): return self._find_following_non_metadata(i) def next_newline_i(): return self._find_following_newline(i) def next_closing_bracket_i(): return self._find_following_closing_square_bracket(i) def next_comma_i(): return self._find_following_comma(i) while i < len(self.elements) - 1: if next_newline_i() < next_entry_i(): self.elements.insert(i + 1, factory.create_newline_element()) if float("-inf") < next_comma_i() < next_closing_bracket_i(): i = next_comma_i() else: i = next_closing_bracket_i() PK!++'poetry/toml/prettify/elements/atomic.pyfrom ..tokens import py2toml, toml2py from ..util import is_dict_like, is_sequence_like from . import common from .errors import InvalidElementError class AtomicElement(common.TokenElement): """ An element containing a sequence of tokens representing a single atomic value that can be updated in place. Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_ATOMIC) def _validate_tokens(self, _tokens): if len([token for token in _tokens if not token.type.is_metadata]) != 1: raise InvalidElementError( "Tokens making up an AtomicElement must contain only one non-metadata token" ) def serialized(self): return "".join(token.source_substring for token in self.tokens) def _value_token_index(self): """ Finds the token where the value is stored. """ # TODO: memoize this value for i, token in enumerate(self.tokens): if not token.type.is_metadata: return i raise RuntimeError("could not find a value token") @property def value(self): """ Returns a Python value contained in this atomic element. """ return toml2py.deserialize(self._tokens[self._value_token_index()]) @property def primitive_value(self): return self.value def set(self, value): """ Sets the contained value to the given one. """ assert (not is_sequence_like(value)) and ( not is_dict_like(value) ), "the value must be an atomic primitive" token_index = self._value_token_index() self._tokens[token_index] = py2toml.create_primitive_token(value) PK!E]  'poetry/toml/prettify/elements/common.pyfrom abc import abstractmethod TYPE_METADATA = "element-metadata" TYPE_ATOMIC = "element-atomic" TYPE_CONTAINER = "element-container" TYPE_MARKUP = "element-markup" class Element: """ An Element: - is one or more Token instances, or one or more other Element instances. Not both. - knows how to serialize its value back to valid TOML code. A non-metadata Element is an Element that: - knows how to deserialize its content into usable Python primitive, seq-like, or dict-like value. - knows how to update its content from a Python primitive, seq-like, or dict-like value while maintaining its formatting. """ def __init__(self, _type): self._type = _type @property def type(self): return self._type @abstractmethod def serialized(self): """ TOML serialization of this element as str. """ raise NotImplementedError class TokenElement(Element): """ An Element made up of tokens """ def __init__(self, _tokens, _type): Element.__init__(self, _type) self._validate_tokens(_tokens) self._tokens = list(_tokens) @property def tokens(self): return self._tokens @property def first_token(self): return self._tokens[0] @abstractmethod def _validate_tokens(self, _tokens): raise NotImplementedError def serialized(self): return "".join(token.source_substring for token in self._tokens) def __repr__(self): return repr(self.tokens) @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ raise NotImplementedError class ContainerElement(Element): """ An Element containing exclusively other elements. """ def __init__(self, sub_elements): Element.__init__(self, TYPE_CONTAINER) self._sub_elements = list(sub_elements) @property def sub_elements(self): return self._sub_elements @property def elements(self): return self.sub_elements def serialized(self): return "".join(element.serialized() for element in self.sub_elements) def __eq__(self, other): return self.primitive_value == other def __repr__(self): return repr(self.primitive_value) @property def primitive_value(self): """ Returns a primitive Python value without any formatting or markup metadata. """ raise NotImplementedError PK!o敩``'poetry/toml/prettify/elements/errors.pyclass InvalidElementError(Exception): """ Raised by Element factories when the given sequence of tokens or sub-elements are invalid for the specific type of Element being created. """ def __init__(self, message): self.message = message def __repr__(self): return "InvalidElementError: {}".format(self.message) PK!t::(poetry/toml/prettify/elements/factory.pyimport datetime import six from .. import tokens from ..tokens import py2toml from ..util import join_with from .atomic import AtomicElement from .metadata import PunctuationElement, WhitespaceElement, NewlineElement from .tableheader import TableHeaderElement def create_element(value, multiline_strings_allowed=True): """ Creates and returns the appropriate elements.Element instance from the given Python primitive, sequence-like, or dict-like value. """ from .array import ArrayElement if ( isinstance( value, (int, float, bool, datetime.datetime, datetime.date) + six.string_types, ) or value is None ): primitive_token = py2toml.create_primitive_token( value, multiline_strings_allowed=multiline_strings_allowed ) return AtomicElement((primitive_token,)) elif isinstance(value, (list, tuple)): preamble = [create_operator_element("[")] postable = [create_operator_element("]")] stuffing_elements = [create_element(v) for v in value] spaced_stuffing = join_with( stuffing_elements, separator=[create_operator_element(","), create_whitespace_element()], ) return ArrayElement(preamble + spaced_stuffing + postable) elif isinstance(value, dict): return create_inline_table( value, multiline_table=False, multiline_strings_allowed=multiline_strings_allowed, ) else: raise RuntimeError( "Value type unaccounted for: {} of type {}".format(value, type(value)) ) def create_inline_table( from_dict, multiline_table=False, multiline_strings_allowed=True ): """ Creates an InlineTable element from the given dict instance. """ from .inlinetable import InlineTableElement preamble = [create_operator_element("{")] postable = [create_operator_element("}")] stuffing_elements = ( ( create_string_element(k, bare_allowed=True), create_whitespace_element(), create_operator_element("="), create_whitespace_element(), create_element(v, multiline_strings_allowed=False), ) for (k, v) in from_dict.items() ) pair_separator = [ create_operator_element(","), create_newline_element() if multiline_table else create_whitespace_element(), ] spaced_elements = join_with(stuffing_elements, separator=pair_separator) return InlineTableElement(preamble + spaced_elements + postable) def create_string_element(value, bare_allowed=False): """ Creates and returns an AtomicElement wrapping a string value. """ return AtomicElement((py2toml.create_string_token(value, bare_allowed),)) def create_operator_element(operator): """ Creates a PunctuationElement instance containing an operator token of the specified type. The operator should be a TOML source str. """ operator_type_map = { ",": tokens.TYPE_OP_COMMA, "=": tokens.TYPE_OP_ASSIGNMENT, "[": tokens.TYPE_OP_SQUARE_LEFT_BRACKET, "]": tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, "[[": tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, "]]": tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, "{": tokens.TYPE_OP_CURLY_LEFT_BRACKET, "}": tokens.TYPE_OP_CURLY_RIGHT_BRACKET, } ts = (tokens.Token(operator_type_map[operator], operator),) return PunctuationElement(ts) def create_newline_element(): """ Creates and returns a single NewlineElement. """ ts = (tokens.Token(tokens.TYPE_NEWLINE, "\n"),) return NewlineElement(ts) def create_whitespace_element(length=1, char=" "): """ Creates and returns a WhitespaceElement containing spaces. """ ts = (tokens.Token(tokens.TYPE_WHITESPACE, char),) * length return WhitespaceElement(ts) def create_table_header_element(names): name_elements = [] if isinstance(names, six.string_types): name_elements = [py2toml.create_string_token(names, bare_string_allowed=True)] else: for (i, name) in enumerate(names): name_elements.append( py2toml.create_string_token(name, bare_string_allowed=True) ) if i < (len(names) - 1): name_elements.append(py2toml.operator_token(tokens.TYPE_OPT_DOT)) return TableHeaderElement( [py2toml.operator_token(tokens.TYPE_OP_SQUARE_LEFT_BRACKET)] + name_elements + [ py2toml.operator_token(tokens.TYPE_OP_SQUARE_RIGHT_BRACKET), py2toml.operator_token(tokens.TYPE_NEWLINE), ] ) def create_array_of_tables_header_element(name): return TableHeaderElement( ( py2toml.operator_token(tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET), py2toml.create_string_token(name, bare_string_allowed=True), py2toml.operator_token(tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET), py2toml.operator_token(tokens.TYPE_NEWLINE), ) ) def create_table(dict_value): """ Creates a TableElement out of a dict instance. """ from .table import TableElement if not isinstance(dict_value, dict): raise ValueError("input must be a dict instance.") table_element = TableElement([create_newline_element()]) for k, v in dict_value.items(): table_element[k] = create_element(v) return table_element def create_multiline_string(text, maximum_line_length): return AtomicElement( _tokens=[py2toml.create_multiline_string(text, maximum_line_length)] ) PK!SX ,poetry/toml/prettify/elements/inlinetable.pyfrom . import factory, abstracttable from .common import Element class InlineTableElement(abstracttable.AbstractTable): """ An Element containing key-value pairs, representing an inline table. Implements dict-like interface. Assumes input sub_elements are correct for an inline table element. """ def __init__(self, sub_elements): abstracttable.AbstractTable.__init__(self, sub_elements) def __setitem__(self, key, value): new_element = ( value if isinstance(value, Element) else factory.create_element(value) ) try: key_i, value_i = self._find_key_and_value(key) # Found, then replace the value element with a new one self._sub_elements = ( self.sub_elements[:value_i] + [new_element] + self.sub_elements[value_i + 1 :] ) except KeyError: # Key does not exist, adding anew! new_entry = [ factory.create_string_element(key, bare_allowed=True), factory.create_whitespace_element(), factory.create_operator_element("="), factory.create_whitespace_element(), new_element, ] if self: # If not empty new_entry = [ factory.create_operator_element(","), factory.create_whitespace_element(), ] + new_entry insertion_index = self._find_closing_curly_bracket() self._sub_elements = ( self.sub_elements[:insertion_index] + new_entry + self.sub_elements[insertion_index:] ) def __delitem__(self, key): key_i, value_i = self._find_key_and_value(key) begin, end = key_i, value_i + 1 # Rules: # 1. begin should be index to the preceding comma to the key # 2. end should be index to the following comma, or the closing bracket # 3. If no preceding comma found but following comma found then end should be the index of the following key preceding_comma = self._find_preceding_comma(begin) found_preceding_comma = preceding_comma >= 0 if found_preceding_comma: begin = preceding_comma following_comma = self._find_following_comma(value_i) if following_comma >= 0: if not found_preceding_comma: end = self._find_following_non_metadata(following_comma) else: end = following_comma else: end = self._find_closing_curly_bracket() self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:] def multiline_equivalent(self): return factory.create_inline_table( self.primitive_value, multiline_table=True, multiline_strings_allowed=True ) @property def value(self): return self # self is a dict-like value that is perfectly usable PK!kD D )poetry/toml/prettify/elements/metadata.pyfrom .. import tokens from . import common from .errors import InvalidElementError class WhitespaceElement(common.TokenElement): """ An element that contains tokens of whitespace """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) def _validate_tokens(self, _tokens): for token in _tokens: if token.type != tokens.TYPE_WHITESPACE: raise InvalidElementError( "Tokens making up a WhitespaceElement must all be whitespace" ) @property def length(self): """ The whitespace length of this element """ return len(self.tokens) class NewlineElement(common.TokenElement): """ An element containing newline tokens Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) def _validate_tokens(self, _tokens): for token in _tokens: if token.type != tokens.TYPE_NEWLINE: raise InvalidElementError( "Tokens making a NewlineElement must all be newlines" ) class CommentElement(common.TokenElement): """ An element containing a single comment token followed by a newline. Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) def _validate_tokens(self, _tokens): if ( len(_tokens) != 2 or _tokens[0].type != tokens.TYPE_COMMENT or _tokens[1].type != tokens.TYPE_NEWLINE ): raise InvalidElementError( "CommentElement needs one comment token followed by one newline token" ) class PunctuationElement(common.TokenElement): """ An element containing a single punctuation token. Raises: InvalidElementError: when passed an invalid sequence of tokens. """ def __init__(self, _tokens): common.TokenElement.__init__(self, _tokens, common.TYPE_METADATA) @property def token(self): """ Returns the token contained in this Element. """ return self.tokens[0] def _validate_tokens(self, _tokens): if not _tokens or not tokens.is_operator(_tokens[0]): raise InvalidElementError( "PunctuationElement must be made of only a single operator token" ) PK!|&poetry/toml/prettify/elements/table.pyfrom . import abstracttable, common, factory from .errors import InvalidElementError from .common import Element from .metadata import CommentElement, NewlineElement, WhitespaceElement class TableElement(abstracttable.AbstractTable): """ An Element containing an unnamed top-level table. Implements dict-like interface. Assumes input sub_elements are correct. Raises InvalidElementError on duplicate keys. """ def __init__(self, sub_elements): abstracttable.AbstractTable.__init__(self, sub_elements) self._check_for_duplicate_keys() def _check_for_duplicate_keys(self): if len(set(self.keys())) < len(self.keys()): raise InvalidElementError("Duplicate keys found") def __setitem__(self, key, value): if key in self: self._update(key, value) else: self._insert(key, value) def _update(self, key, value): _, value_i = self._find_key_and_value(key) self._sub_elements[value_i] = ( value if isinstance(value, Element) else factory.create_element(value) ) def _find_insertion_index(self): """ Returns the self.sub_elements index in which new entries should be inserted. """ non_metadata_elements = tuple(self._enumerate_non_metadata_sub_elements()) if not non_metadata_elements: return 0 last_entry_i = non_metadata_elements[-1][0] following_newline_i = self._find_following_line_terminator(last_entry_i) return following_newline_i + 1 def _detect_indentation_size(self): """ Detects the level of indentation used in this table. """ def lines(): # Returns a sequence of sequences of elements belonging to each line start = 0 for i, element in enumerate(self.elements): if isinstance(element, (CommentElement, NewlineElement)): yield self.elements[start : i + 1] start = i + 1 def indentation(line): # Counts the number of whitespace tokens at the beginning of this line try: first_non_whitespace_i = next( i for (i, e) in enumerate(line) if not isinstance(e, WhitespaceElement) ) return sum(space.length for space in line[:first_non_whitespace_i]) except StopIteration: return 0 def is_empty_line(line): return all(e.type == common.TYPE_METADATA for e in line) try: return min( indentation(line) for line in lines() if len(line) > 1 and not is_empty_line(line) ) except ValueError: # Raised by ValueError when no matching lines found return 0 def _insert(self, key, value): value_element = ( value if isinstance(value, Element) else factory.create_element(value) ) indentation_size = self._detect_indentation_size() indentation = ( [factory.create_whitespace_element(self._detect_indentation_size())] if indentation_size else [] ) inserted_elements = indentation + [ factory.create_string_element(key, bare_allowed=True), factory.create_whitespace_element(), factory.create_operator_element("="), factory.create_whitespace_element(), value_element, factory.create_newline_element(), ] insertion_index = self._find_insertion_index() self._sub_elements = ( self.sub_elements[:insertion_index] + inserted_elements + self.sub_elements[insertion_index:] ) def __delitem__(self, key): begin, _ = self._find_key_and_value(key) preceding_newline = self._find_preceding_newline(begin) if preceding_newline >= 0: begin = preceding_newline end = self._find_following_newline(begin) if end < 0: end = len(tuple(self._sub_elements)) self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:] @property def value(self): return self def __eq__(self, other): return self.primitive_value == other def __iter__(self): return iter(self.keys()) def __str__(self): return str(self.primitive_value) PK!T,poetry/toml/prettify/elements/tableheader.pyfrom .. import tokens from ..tokens import toml2py from . import common from .common import TokenElement from .errors import InvalidElementError _opening_bracket_types = ( tokens.TYPE_OP_SQUARE_LEFT_BRACKET, tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, ) _closing_bracket_types = ( tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, ) _name_types = (tokens.TYPE_BARE_STRING, tokens.TYPE_LITERAL_STRING, tokens.TYPE_STRING) class TableHeaderElement(TokenElement): """ An element containing opening and closing single and double square brackets, strings and dots and ending with a newline. Raises InvalidElementError. """ def __init__(self, _tokens): TokenElement.__init__(self, _tokens, common.TYPE_MARKUP) self._names = tuple( toml2py.deserialize(token) for token in self._tokens if token.type in _name_types ) @property def is_array_of_tables(self): opening_bracket = next( token for i, token in enumerate(self._tokens) if token.type in _opening_bracket_types ) return opening_bracket.type == tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET @property def names(self): """ Returns a sequence of string names making up this table header name. """ return self._names def has_name_prefix(self, names): """ Returns True if the header names is prefixed by the given sequence of names. """ for i, name in enumerate(names): if self.names[i] != name: return False return True def serialized(self): return "".join(token.source_substring for token in self._tokens) def is_named(self, names): """ Returns True if the given name sequence matches the full name of this header. """ return tuple(names) == self.names def _validate_tokens(self, _tokens): opening_bracket_i = next( ( i for i, token in enumerate(_tokens) if token.type in _opening_bracket_types ), float("-inf"), ) if opening_bracket_i < 0: raise InvalidElementError("Expected an opening bracket") _tokens = _tokens[opening_bracket_i + 1 :] first_name_i = next( (i for i, token in enumerate(_tokens) if token.type in _name_types), float("-inf"), ) if first_name_i < 0: raise InvalidElementError("Expected a table header name") _tokens = _tokens[first_name_i + 1 :] while True: next_dot_i = next( ( i for i, token in enumerate(_tokens) if token.type == tokens.TYPE_OPT_DOT ), float("-inf"), ) if next_dot_i < 0: break _tokens = _tokens[next_dot_i + 1 :] next_name_i = next( (i for i, token in enumerate(_tokens) if token.type in _name_types), float("-inf"), ) if next_name_i < 0: raise InvalidElementError("Expected a name after the dot") _tokens = _tokens[next_name_i + 1 :] closing_bracket_i = next( ( i for i, token in enumerate(_tokens) if token.type in _closing_bracket_types ), float("-inf"), ) if closing_bracket_i < 0: raise InvalidElementError("Expected a closing bracket") if _tokens[-1].type != tokens.TYPE_NEWLINE: raise InvalidElementError("Must end with a newline") PK!3poetry/toml/prettify/elements/traversal/__init__.pyfrom ...tokens import TYPE_OP_COMMA from ...tokens import TYPE_OP_CURLY_RIGHT_BRACKET from ..common import TYPE_METADATA from ..metadata import PunctuationElement, NewlineElement from . import predicates class TraversalMixin: """ A mix-in that provides convenient sub-element traversal to any class with an `elements` member that is a sequence of Element instances """ def __find_following_element(self, index, predicate): """ Finds and returns the index of element in self.elements that evaluates the given predicate to True and whose index is higher than the given index, or returns -Infinity on failure. """ return find_following(self.elements, predicate, index) def __find_preceding_element(self, index, predicate): """ Finds and returns the index of the element in self.elements that evaluates the given predicate to True and whose index is lower than the given index. """ i = find_previous(self.elements, predicate, index) if i == float("inf"): return float("-inf") return i def __must_find_following_element(self, predicate): """ Finds and returns the index to the element in self.elements that evaluatest the predicate to True, or raises an error. """ i = self.__find_following_element(-1, predicate) if i < 0: raise RuntimeError("Could not find non-optional element") return i def _enumerate_non_metadata_sub_elements(self): """ Returns a sequence of of (index, sub_element) of the non-metadata sub-elements. """ return ( (i, element) for i, element in enumerate(self.elements) if element.type != TYPE_METADATA ) def _find_preceding_comma(self, index): """ Returns the index of the preceding comma element to the given index, or -Infinity. """ return self.__find_preceding_element(index, predicates.op_comma) def _find_following_comma(self, index): """ Returns the index of the following comma element after the given index, or -Infinity. """ def predicate(element): return ( isinstance(element, PunctuationElement) and element.token.type == TYPE_OP_COMMA ) return self.__find_following_element(index, predicate) def _find_following_newline(self, index): """ Returns the index of the following newline element after the given index, or -Infinity. """ return self.__find_following_element( index, lambda e: isinstance(e, NewlineElement) ) def _find_following_comment(self, index): """ Returns the index of the following comment element after the given index, or -Infinity. """ return self.__find_following_element(index, predicates.comment) def _find_following_line_terminator(self, index): """ Returns the index of the following comment or newline element after the given index, or -Infinity. """ following_comment = self._find_following_comment(index) following_newline = self._find_following_newline(index) if following_comment == float("-inf"): return following_newline if following_newline == float("inf"): return following_comment if following_newline < following_comment: return following_newline else: return following_comment def _find_preceding_newline(self, index): """ Returns the index of the preceding newline element to the given index, or -Infinity. """ return self.__find_preceding_element(index, predicates.newline) def _find_following_non_metadata(self, index): """ Returns the index to the following non-metadata element after the given index, or -Infinity. """ return self.__find_following_element(index, predicates.non_metadata) def _find_closing_square_bracket(self): """ Returns the index to the closing square bracket, or raises an Error. """ return self.__must_find_following_element(predicates.closing_square_bracket) def _find_following_opening_square_bracket(self, index): """ Returns the index to the opening square bracket, or -Infinity. """ return self.__find_following_element(index, predicates.opening_square_bracket) def _find_following_closing_square_bracket(self, index): """ Returns the index to the closing square bracket, or -Infinity. """ return self.__find_following_element(index, predicates.closing_square_bracket) def _find_following_table(self, index): """ Returns the index to the next TableElement after the specified index, or -Infinity. """ return self.__find_following_element(index, predicates.table) def _find_preceding_table(self, index): """ Returns the index to the preceding TableElement to the specified index, or -Infinity. """ return self.__find_preceding_element(index, predicates.table) def _find_closing_curly_bracket(self): """ Returns the index to the closing curly bracket, or raises an Error. """ def predicate(element): return ( isinstance(element, PunctuationElement) and element.token.type == TYPE_OP_CURLY_RIGHT_BRACKET ) return self.__must_find_following_element(predicate) def _find_following_table_header(self, index): """ Returns the index to the table header after the given element index, or -Infinity. """ return self.__find_following_element(index, predicates.table_header) def find_following(element_seq, predicate, index=None): """ Finds and returns the index of the next element fulfilling the specified predicate after the specified index, or -Infinity. Starts searching linearly from the start_from index. """ if isinstance(index, (int, float)) and index < 0: index = None for i, element in tuple(enumerate(element_seq))[ index + 1 if index is not None else index : ]: if predicate(element): return i return float("-inf") def find_previous(element_seq, predicate, index=None): """ Finds and returns the index of the previous element fulfilling the specified predicate preceding to the specified index, or Infinity. """ if isinstance(index, (int, float)) and index >= len(element_seq): index = None for i, element in reversed(tuple(enumerate(element_seq))[:index]): if predicate(element): return i return float("inf") PK!|Zff5poetry/toml/prettify/elements/traversal/predicates.py """ The following predicates can be used in the traversal functions directly. """ from ...tokens import TYPE_OP_ASSIGNMENT from ...tokens import TYPE_OP_COMMA from ...tokens import TYPE_OP_SQUARE_LEFT_BRACKET from ...tokens import TYPE_OP_SQUARE_RIGHT_BRACKET from ..atomic import AtomicElement from ..metadata import ( PunctuationElement, CommentElement, NewlineElement, WhitespaceElement, ) from .. import common atomic = lambda e: isinstance(e, AtomicElement) op_assignment = ( lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_ASSIGNMENT ) op_comma = lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_COMMA comment = lambda e: isinstance(e, CommentElement) newline = lambda e: isinstance(e, NewlineElement) non_metadata = lambda e: e.type != common.TYPE_METADATA closing_square_bracket = ( lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_SQUARE_RIGHT_BRACKET ) opening_square_bracket = ( lambda e: isinstance(e, PunctuationElement) and e.token.type == TYPE_OP_SQUARE_LEFT_BRACKET ) def table(e): from ..table import TableElement return isinstance(e, TableElement) def table_header(e): from ..tableheader import TableHeaderElement return isinstance(e, TableHeaderElement) whitespace = lambda e: isinstance(e, WhitespaceElement) PK!poetry/toml/prettify/errors.pyclass TOMLError(Exception): """ All errors raised by this module are descendants of this type. """ class InvalidTOMLFileError(TOMLError): pass class NoArrayFoundError(TOMLError): """ An array of tables was requested but none exist by the given name. """ class InvalidValueError(TOMLError): pass class DuplicateKeysError(TOMLError): """ Duplicate keys detected in the parsed file. """ class DuplicateTablesError(TOMLError): """ Duplicate tables detected in the parsed file. """ PK!&poetry/toml/prettify/lexer/__init__.py """ A regular expression based Lexer/tokenizer for TOML. """ from collections import namedtuple import re from .. import tokens from ..errors import TOMLError TokenSpec = namedtuple("TokenSpec", ("type", "re")) # Specs of all the valid tokens _LEXICAL_SPECS = ( TokenSpec(tokens.TYPE_COMMENT, re.compile(r"^(#.*)\n")), TokenSpec( tokens.TYPE_STRING, re.compile(r'^("(([^"]|\\")+?[^\\]|([^"]|\\")|)")') ), # Single line only TokenSpec(tokens.TYPE_MULTILINE_STRING, re.compile(r'^(""".*?""")', re.DOTALL)), TokenSpec(tokens.TYPE_LITERAL_STRING, re.compile(r"^('.*?')")), TokenSpec( tokens.TYPE_MULTILINE_LITERAL_STRING, re.compile(r"^('''.*?''')", re.DOTALL) ), TokenSpec(tokens.TYPE_BARE_STRING, re.compile(r"^([A-Za-z0-9_-]+)")), TokenSpec( tokens.TYPE_DATE, re.compile( r"^([0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]*)?)?(([zZ])|((\+|-)[0-9]{2}:[0-9]{2}))?)" ), ), TokenSpec(tokens.TYPE_WHITESPACE, re.compile(r"^( |\t)", re.DOTALL)), TokenSpec(tokens.TYPE_INTEGER, re.compile(r"^(((\+|-)[0-9_]+)|([0-9][0-9_]*))")), TokenSpec( tokens.TYPE_FLOAT, re.compile( r"^((((\+|-)[0-9_]+)|([1-9][0-9_]*))(\.[0-9_]+)?([eE](\+|-)?[0-9_]+)?)" ), ), TokenSpec(tokens.TYPE_BOOLEAN, re.compile(r"^(true|false)")), TokenSpec(tokens.TYPE_OP_SQUARE_LEFT_BRACKET, re.compile(r"^(\[)")), TokenSpec(tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, re.compile(r"^(\])")), TokenSpec(tokens.TYPE_OP_CURLY_LEFT_BRACKET, re.compile(r"^(\{)")), TokenSpec(tokens.TYPE_OP_CURLY_RIGHT_BRACKET, re.compile(r"^(\})")), TokenSpec(tokens.TYPE_OP_ASSIGNMENT, re.compile(r"^(=)")), TokenSpec(tokens.TYPE_OP_COMMA, re.compile(r"^(,)")), TokenSpec(tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, re.compile(r"^(\[\[)")), TokenSpec(tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, re.compile(r"^(\]\])")), TokenSpec(tokens.TYPE_OPT_DOT, re.compile(r"^(\.)")), TokenSpec(tokens.TYPE_NEWLINE, re.compile("^(\n|\r\n)")), ) def _next_token_candidates(source): matches = [] for token_spec in _LEXICAL_SPECS: match = token_spec.re.search(source) if match: matches.append(tokens.Token(token_spec.type, match.group(1))) return matches def _choose_from_next_token_candidates(candidates): if len(candidates) == 1: return candidates[0] elif len(candidates) > 1: # Return the maximal-munch with ties broken by natural order of token type. maximal_munch_length = max(len(token.source_substring) for token in candidates) maximal_munches = [ token for token in candidates if len(token.source_substring) == maximal_munch_length ] return sorted(maximal_munches)[0] # Return the first in sorting by priority def _munch_a_token(source): """ Munches a single Token instance if it could recognize one at the beginning of the given source text, or None if no token type could be recognized. """ candidates = _next_token_candidates(source) return _choose_from_next_token_candidates(candidates) class LexerError(TOMLError): def __init__(self, message): self._message = message def __repr__(self): return self._message def __str__(self): return self._message def tokenize(source, is_top_level=False): """ Tokenizes the input TOML source into a stream of tokens. If is_top_level is set to True, will make sure that the input source has a trailing newline character before it is tokenized. Raises a LexerError when it fails recognize another token while not at the end of the source. """ # Newlines are going to be normalized to UNIX newlines. source = source.replace("\r\n", "\n") if is_top_level and source and source[-1] != "\n": source += "\n" next_row = 1 next_col = 1 next_index = 0 while next_index < len(source): new_token = _munch_a_token(source[next_index:]) if not new_token: raise LexerError( "failed to read the next token at ({}, {}): {}".format( next_row, next_col, source[next_index:] ) ) # Set the col and row on the new token new_token = tokens.Token( new_token.type, new_token.source_substring, next_col, next_row ) # Advance the index, row and col count next_index += len(new_token.source_substring) for c in new_token.source_substring: if c == "\n": next_row += 1 next_col = 1 else: next_col += 1 yield new_token PK!P]'''poetry/toml/prettify/parser/__init__.py """ A parser for TOML tokens into TOML elements. """ from .elementsanitizer import sanitize from .errors import ParsingError from .parser import toml_file_elements from .tokenstream import TokenStream def parse_tokens(tokens): """ Parses the given token sequence into a sequence of top-level TOML elements. Raises ParserError on invalid TOML input. """ return _parse_token_stream(TokenStream(tokens)) def _parse_token_stream(token_stream): """ Parses the given token_stream into a sequence of top-level TOML elements. Raises ParserError on invalid input TOML. """ elements, pending = toml_file_elements(token_stream) if not pending.at_end: raise ParsingError("Failed to parse line {}".format(pending.head.row)) return sanitize(elements) PK!OpPXX/poetry/toml/prettify/parser/elementsanitizer.pyfrom ..elements import TYPE_METADATA from ..elements.table import TableElement from ..elements.tableheader import TableHeaderElement from ..errors import InvalidTOMLFileError from ..util import PeekableIterator def sanitize(_elements): """ Finds TableHeader elements that are not followed by TableBody elements and inserts empty TableElement right after those. """ output = list(_elements) def find_next_table_header(after=-1): return next( ( i for (i, element) in enumerate(output) if i > after and isinstance(element, TableHeaderElement) ), float("-inf"), ) def find_next_table_body(after=-1): return next( ( i for (i, element) in enumerate(output) if i > after and isinstance(element, TableElement) ), float("-inf"), ) next_table_header_i = find_next_table_header() while next_table_header_i >= 0: following_table_header_i = find_next_table_header(next_table_header_i) following_table_body_i = find_next_table_body(next_table_header_i) if (following_table_body_i < 0) or ( following_table_header_i >= 0 and (following_table_header_i < following_table_body_i) ): output.insert(next_table_header_i + 1, TableElement(tuple())) next_table_header_i = find_next_table_header(next_table_header_i) return output def validate_sanitized(_elements): # Non-metadata elements must start with an optional TableElement, # followed by zero or more (TableHeaderElement, TableElement) pairs. if not _elements: return it = PeekableIterator(e for e in _elements if e.type != TYPE_METADATA) if isinstance(it.peek(), TableElement): it.next() while it.peek(): if not isinstance(it.peek(), TableHeaderElement): raise InvalidTOMLFileError it.next() if not isinstance(it.peek(), TableElement): raise InvalidTOMLFileError it.next() PK!:*%poetry/toml/prettify/parser/errors.pyfrom ..errors import TOMLError class ParsingError(TOMLError): def __init__(self, message="", token=None): self.message = message self.token = token def __repr__(self): if self.message and self.token: return "{} at row {} and col {}".format( self.message, self.token.row, self.token.col ) else: return self.message def __str__(self): return repr(self) PK!w88%poetry/toml/prettify/parser/parser.py """ A Recursive Descent implementation of a lexical parser for TOML. Grammar: -------- Newline -> NEWLINE Comment -> COMMENT Newline LineTerminator -> Comment | Newline Space -> WHITESPACE Space | WHITESPACE | EMPTY TableHeader -> Space [ Space TableHeaderName Space ] Space LineTerminator | Space [[ Space TableHeaderName Space ]] Space LineTerminator TableHeaderName -> STRING Space '.' Space TableHeaderName | STRING Atomic -> STRING | INTEGER | FLOAT | DATE | BOOLEAN Array -> '[' Space ArrayInternal Space ']' | '[' Space ArrayInternal Space LineTerminator Space ']' ArrayInternal -> LineTerminator Space ArrayInternal | Value Space ',' Space LineTerminator Space ArrayInternal | Value Space ',' Space ArrayInternal | LineTerminator | Value | EMPTY InlineTable -> '{' Space InlineTableInternal Space '}' InlineTableKeyValuePair = STRING Space '=' Space Value InlineTableInternal -> InlineTableKeyValuePair Space ',' Space InlineTableInternal | InlineTableKeyValuePair | Empty Value -> Atomic | InlineTable | Array KeyValuePair -> Space STRING Space '=' Space Value Space LineTerminator TableBody -> KeyValuePair TableBody | EmptyLine TableBody | EmptyLine | KeyValuePair EmptyLine -> Space LineTerminator FileEntry -> TableHeader | TableBody TOMLFileElements -> FileEntry TOMLFileElements | FileEntry | EmptyLine | EMPTY """ from ..elements.array import ArrayElement from ..elements.atomic import AtomicElement from ..elements.inlinetable import InlineTableElement from ..elements.metadata import ( NewlineElement, CommentElement, WhitespaceElement, PunctuationElement, ) from ..elements.table import TableElement from ..elements.tableheader import TableHeaderElement from ..tokens import TYPE_BARE_STRING from ..tokens import TYPE_BOOLEAN from ..tokens import TYPE_COMMENT from ..tokens import TYPE_DATE from ..tokens import TYPE_FLOAT from ..tokens import TYPE_INTEGER from ..tokens import TYPE_LITERAL_STRING from ..tokens import TYPE_MULTILINE_LITERAL_STRING from ..tokens import TYPE_MULTILINE_STRING from ..tokens import TYPE_NEWLINE from ..tokens import TYPE_OP_ASSIGNMENT from ..tokens import TYPE_OP_COMMA from ..tokens import TYPE_OP_CURLY_LEFT_BRACKET from ..tokens import TYPE_OP_CURLY_RIGHT_BRACKET from ..tokens import TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET from ..tokens import TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET from ..tokens import TYPE_OP_SQUARE_LEFT_BRACKET from ..tokens import TYPE_OP_SQUARE_RIGHT_BRACKET from ..tokens import TYPE_OPT_DOT from ..tokens import TYPE_STRING from ..tokens import TYPE_WHITESPACE from .recdesc import capture_from from .errors import ParsingError """ Non-terminals are represented as functions which return (RESULT, pending_token_stream), or raise ParsingError. """ def token(token_type): def factory(ts): t = ts.head if t.type != token_type: raise ParsingError("Expected a token of type {}".format(token_type)) return t, ts.tail return factory def newline_element(token_stream): """ Returns NewlineElement, pending_token_stream or raises ParsingError. """ captured = capture_from(token_stream).find(token(TYPE_NEWLINE)) return NewlineElement(captured.value()), captured.pending_tokens def comment_tokens(ts1): c1 = capture_from(ts1).find(token(TYPE_COMMENT)).and_find(token(TYPE_NEWLINE)) return c1.value(), c1.pending_tokens def comment_element(token_stream): """ Returns CommentElement, pending_token_stream or raises ParsingError. """ captured = capture_from(token_stream).find(comment_tokens) return CommentElement(captured.value()), captured.pending_tokens def line_terminator_tokens(token_stream): captured = ( capture_from(token_stream).find(comment_tokens).or_find(token(TYPE_NEWLINE)) ) return captured.value(), captured.pending_tokens def line_terminator_element(token_stream): captured = capture_from(token_stream).find(comment_element).or_find(newline_element) return captured.value("Expected a comment or a newline")[0], captured.pending_tokens def zero_or_more_tokens(token_type): def factory(token_stream): def more(ts): c = ( capture_from(ts) .find(token(token_type)) .and_find(zero_or_more_tokens(token_type)) ) return c.value(), c.pending_tokens def two(ts): c = capture_from(ts).find(token(TYPE_WHITESPACE)) return c.value(), c.pending def zero(ts): return tuple(), ts captured = capture_from(token_stream).find(more).or_find(two).or_find(zero) return captured.value(), captured.pending_tokens return factory def space_element(token_stream): captured = capture_from(token_stream).find(zero_or_more_tokens(TYPE_WHITESPACE)) return ( WhitespaceElement([t for t in captured.value() if t]), captured.pending_tokens, ) def string_token(token_stream): captured = ( capture_from(token_stream) .find(token(TYPE_BARE_STRING)) .or_find(token(TYPE_STRING)) .or_find(token(TYPE_LITERAL_STRING)) .or_find(token(TYPE_MULTILINE_STRING)) .or_find(token(TYPE_MULTILINE_LITERAL_STRING)) ) return captured.value("Expected a string"), captured.pending_tokens def string_element(token_stream): captured = capture_from(token_stream).find(string_token) return AtomicElement(captured.value()), captured.pending_tokens def table_header_name_tokens(token_stream): def one(ts): c = ( capture_from(ts) .find(string_token) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(token(TYPE_OPT_DOT)) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(table_header_name_tokens) ) return c.value(), c.pending_tokens captured = capture_from(token_stream).find(one).or_find(string_token) return captured.value(), captured.pending_tokens def table_header_element(token_stream): def single(ts1): c1 = ( capture_from(ts1) .find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(token(TYPE_OP_SQUARE_LEFT_BRACKET)) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(table_header_name_tokens) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(token(TYPE_OP_SQUARE_RIGHT_BRACKET)) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(line_terminator_tokens) ) return c1.value(), c1.pending_tokens def double(ts2): c2 = ( capture_from(ts2) .find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(token(TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET)) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(table_header_name_tokens) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(token(TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET)) .and_find(zero_or_more_tokens(TYPE_WHITESPACE)) .and_find(line_terminator_tokens) ) return c2.value(), c2.pending_tokens captured = capture_from(token_stream).find(single).or_find(double) return TableHeaderElement(captured.value()), captured.pending_tokens def atomic_element(token_stream): captured = ( capture_from(token_stream) .find(string_token) .or_find(token(TYPE_INTEGER)) .or_find(token(TYPE_FLOAT)) .or_find(token(TYPE_DATE)) .or_find(token(TYPE_BOOLEAN)) ) return ( AtomicElement(captured.value("Expected an atomic primitive value")), captured.pending_tokens, ) def punctuation_element(token_type): def factory(ts): c = capture_from(ts).find(token(token_type)) return ( PunctuationElement( c.value("Expected the punctuation element: {}".format(token_type)) ), c.pending_tokens, ) return factory def value(token_stream): captured = ( capture_from(token_stream) .find(atomic_element) .or_find(array_element) .or_find(inline_table_element) ) return ( captured.value("Expected a primitive value, array or an inline table"), captured.pending_tokens, ) def array_internal(ts): def zero(ts0): c = ( capture_from(ts0) .and_find(line_terminator_element) .and_find(space_element) .and_find(array_internal) ) return c.value(), c.pending_tokens def one(ts1): c = ( capture_from(ts1) .find(value) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_COMMA)) .and_find(space_element) .and_find(line_terminator_element) .and_find(space_element) .and_find(array_internal) ) return c.value(), c.pending_tokens def two(ts2): c = ( capture_from(ts2) .find(value) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_COMMA)) .and_find(space_element) .and_find(array_internal) ) return c.value(), c.pending_tokens def three(ts3): c = capture_from(ts3).find(space_element).and_find(line_terminator_element) return c.value(), c.pending_tokens captured = ( capture_from(ts) .find(zero) .or_find(one) .or_find(two) .or_find(three) .or_find(value) .or_empty() ) return captured.value(), captured.pending_tokens def array_element(token_stream): def one(ts1): ca = ( capture_from(ts1) .find(punctuation_element(TYPE_OP_SQUARE_LEFT_BRACKET)) .and_find(space_element) .and_find(array_internal) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_SQUARE_RIGHT_BRACKET)) ) return ca.value(), ca.pending_tokens def two(ts2): ca = ( capture_from(ts2) .find(punctuation_element(TYPE_OP_SQUARE_LEFT_BRACKET)) .and_find(space_element) .and_find(array_internal) .and_find(space_element) .and_find(line_terminator_element) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_SQUARE_RIGHT_BRACKET)) ) return ca.value(), ca.pending_tokens captured = capture_from(token_stream).find(one).or_find(two) return ArrayElement(captured.value()), captured.pending_tokens def inline_table_element(token_stream): # InlineTableElement -> '{' Space InlineTableInternal Space '}' # InlineTableKeyValuePair = STRING Space '=' Space Value # InlineTableInternal -> InlineTableKeyValuePair Space ',' Space InlineTableInternal | # InlineTableKeyValuePair | Empty def key_value(ts): ca = ( capture_from(ts) .find(string_element) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_ASSIGNMENT)) .and_find(space_element) .and_find(value) ) return ca.value(), ca.pending_tokens def internal(ts): def one(ts1): c1 = ( capture_from(ts1) .find(key_value) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_COMMA)) .and_find(space_element) .and_find(internal) ) return c1.value(), c1.pending_tokens c = capture_from(ts).find(one).or_find(key_value).or_empty() return c.value(), c.pending_tokens captured = ( capture_from(token_stream) .find(punctuation_element(TYPE_OP_CURLY_LEFT_BRACKET)) .and_find(space_element) .and_find(internal) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_CURLY_RIGHT_BRACKET)) ) return InlineTableElement(captured.value()), captured.pending_tokens def key_value_pair(token_stream): captured = ( capture_from(token_stream) .find(space_element) .and_find(string_element) .and_find(space_element) .and_find(punctuation_element(TYPE_OP_ASSIGNMENT)) .and_find(space_element) .and_find(value) .and_find(space_element) .and_find(line_terminator_element) ) return captured.value(), captured.pending_tokens def table_body_elements(token_stream): # TableBody -> KeyValuePair TableBody | EmptyLine TableBody | EmptyLine | KeyValuePair def one(ts1): c = capture_from(ts1).find(key_value_pair).and_find(table_body_elements) return c.value(), c.pending_tokens def two(ts2): c = capture_from(ts2).find(empty_line_elements).and_find(table_body_elements) return c.value(), c.pending_tokens captured = ( capture_from(token_stream) .find(one) .or_find(two) .or_find(empty_line_elements) .or_find(key_value_pair) ) return captured.value(), captured.pending_tokens def table_body_element(token_stream): captured = capture_from(token_stream).find(table_body_elements) return TableElement(captured.value()), captured.pending_tokens def empty_line_tokens(ts1): c1 = capture_from(ts1).find(space_element).and_find(line_terminator_element) return c1.value(), c1.pending_tokens def empty_line_elements(token_stream): captured = capture_from(token_stream).find(empty_line_tokens) return captured.value(), captured.pending_tokens def file_entry_element(token_stream): captured = ( capture_from(token_stream) .find(table_header_element) .or_find(table_body_element) ) return captured.value(), captured.pending_tokens def toml_file_elements(token_stream): def one(ts1): c1 = capture_from(ts1).find(file_entry_element).and_find(toml_file_elements) return c1.value(), c1.pending_tokens captured = ( capture_from(token_stream).find(one).or_find(file_entry_element).or_empty() ) return captured.value(), captured.pending_tokens PK!&poetry/toml/prettify/parser/recdesc.pyfrom ..elements.array import ArrayElement from .errors import ParsingError from .tokenstream import TokenStream class Capturer: """ Recursive-descent matching DSL. Yeah.. """ def __init__(self, token_stream, value=tuple(), dormant_error=None): self._token_stream = token_stream self._value = value self._dormant_error = dormant_error def find(self, finder): """ Searches the token stream using the given finder. `finder(ts)` is a function that accepts a `TokenStream` instance and returns `(element, pending_ts)` where `element` is the found "something" or a sequence of "somethings", and `pending_ts` the unconsumed `TokenStream`. `finder(ts)` can raise `ParsingError` to indicate that it couldn't find anything, or a `TokenStream.EndOfStream` to indicate a premature end of the TokenStream. This method returns a Capturer instance that can be further used to find more and more "somethings". The value at any given moment can be retrieved via the `Capturer.value()` method. """ try: # Execute finder! element, pending_ts = finder(self._token_stream) # If result is not a sequence, make it so if isinstance(element, ArrayElement) or not isinstance( element, (tuple, list) ): element = (element,) # Return a Capturer with accumulated findings return Capturer(pending_ts, value=self.value() + element) except ParsingError as e: # Failed to find, store error in returned value return Capturer(self._token_stream, dormant_error=e) except TokenStream.EndOfStream as e: # Premature end of stream, store error in returned value return Capturer(self._token_stream, dormant_error=e) def value(self, parsing_expectation_msg=None): """ Returns the accumulated values found as a sequence of values, or raises an encountered dormant error. If parsing_expectation_msg is specified and a dormant_error is a ParsingError, the expectation message is used instead in it. """ if self._dormant_error: if parsing_expectation_msg and isinstance( self._dormant_error, ParsingError ): raise ParsingError( parsing_expectation_msg, token=self._token_stream.head ) else: raise self._dormant_error return self._value @property def pending_tokens(self): """ Returns a TokenStream with the pending tokens yet to be processed. """ return self._token_stream def or_find(self, finder): """ If a dormant_error is present, try this new finder instead. If not, does nothing. """ if self._dormant_error: return Capturer(self._token_stream).find(finder) else: return self def or_end_of_file(self): """ Discards any errors if at end of the stream. """ if isinstance(self._dormant_error, TokenStream.EndOfStream): return Capturer(self.pending_tokens, value=self._value) else: return self def or_empty(self): """ Discards any previously-encountered dormant error. """ if self._dormant_error: return Capturer(self.pending_tokens, value=self._value) else: return self def and_find(self, finder): """ Accumulate new "somethings" to the stored value using the given finder. """ if self._dormant_error: return Capturer(self.pending_tokens, dormant_error=self._dormant_error) return Capturer(self.pending_tokens, self.value()).find(finder) def capture_from(token_stream): return Capturer(token_stream) PK! 7WW*poetry/toml/prettify/parser/tokenstream.pyclass TokenStream: """ An immutable subset of a token sequence """ class EndOfStream(Exception): pass Nothing = tuple() def __init__(self, _tokens, offset=0): if isinstance(_tokens, tuple): self._tokens = _tokens else: self._tokens = tuple(_tokens) self._head_index = offset def __len__(self): return len(self._tokens) - self.offset @property def head(self): try: return self._tokens[self._head_index] except IndexError: raise TokenStream.EndOfStream @property def tail(self): return TokenStream(self._tokens, offset=self._head_index + 1) @property def offset(self): return self._head_index @property def at_end(self): return self.offset >= len(self._tokens) PK!̚'poetry/toml/prettify/tokens/__init__.py """ TOML lexical tokens. """ class TokenType: """ A TokenType is a concrete type of a source token along with a defined priority and a higher-order kind. The priority will be used in determining the tokenization behaviour of the lexer in the following manner: whenever more than one token is recognizable as the next possible token and they are all of equal source length, this priority is going to be used to break the tie by favoring the token type of the lowest priority value. A TokenType instance is naturally ordered by its priority. """ def __init__(self, name, priority, is_metadata): self._priority = priority self._name = name self._is_metadata = is_metadata @property def is_metadata(self): return self._is_metadata @property def priority(self): return self._priority def __repr__(self): return "{}-{}".format(self.priority, self._name) def __lt__(self, other): return isinstance(other, TokenType) and self._priority < other.priority # Possible types of tokens TYPE_BOOLEAN = TokenType("boolean", 0, is_metadata=False) TYPE_INTEGER = TokenType("integer", 0, is_metadata=False) TYPE_OP_COMMA = TokenType("comma", 0, is_metadata=True) TYPE_OP_SQUARE_LEFT_BRACKET = TokenType("square_left_bracket", 0, is_metadata=True) TYPE_OP_SQUARE_RIGHT_BRACKET = TokenType("square_right_bracket", 0, is_metadata=True) TYPE_OP_CURLY_LEFT_BRACKET = TokenType("curly_left_bracket", 0, is_metadata=True) TYPE_OP_CURLY_RIGHT_BRACKET = TokenType("curly_right_bracket", 0, is_metadata=True) TYPE_OP_ASSIGNMENT = TokenType("assignment", 0, is_metadata=True) TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET = TokenType( "double_square_left_bracket", 0, is_metadata=True ) TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET = TokenType( "double_square_right_bracket", 0, is_metadata=True ) TYPE_FLOAT = TokenType("float", 1, is_metadata=False) TYPE_DATE = TokenType("date", 40, is_metadata=False) TYPE_OPT_DOT = TokenType("dot", 40, is_metadata=True) TYPE_BARE_STRING = TokenType("bare_string", 50, is_metadata=False) TYPE_STRING = TokenType("string", 90, is_metadata=False) TYPE_MULTILINE_STRING = TokenType("multiline_string", 90, is_metadata=False) TYPE_LITERAL_STRING = TokenType("literal_string", 90, is_metadata=False) TYPE_MULTILINE_LITERAL_STRING = TokenType( "multiline_literal_string", 90, is_metadata=False ) TYPE_NEWLINE = TokenType("newline", 91, is_metadata=True) TYPE_WHITESPACE = TokenType("whitespace", 93, is_metadata=True) TYPE_COMMENT = TokenType("comment", 95, is_metadata=True) def is_operator(token): """ Returns True if the given token is an operator token. """ return token.type in ( TYPE_OP_COMMA, TYPE_OP_SQUARE_LEFT_BRACKET, TYPE_OP_SQUARE_RIGHT_BRACKET, TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, TYPE_OP_CURLY_LEFT_BRACKET, TYPE_OP_CURLY_RIGHT_BRACKET, TYPE_OP_ASSIGNMENT, TYPE_OPT_DOT, ) def is_string(token): return token.type in ( TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_LITERAL_STRING, TYPE_BARE_STRING, TYPE_MULTILINE_LITERAL_STRING, ) class Token: """ A token/lexeme in a TOML source file. A Token instance is naturally ordered by its type. """ def __init__(self, _type, source_substring, col=None, row=None): self._source_substring = source_substring self._type = _type self._col = col self._row = row def __eq__(self, other): if not isinstance(other, Token): return False return ( self.source_substring == other.source_substring and self.type == other.type ) @property def col(self): """ Column number (1-indexed). """ return self._col @property def row(self): """ Row number (1-indexed). """ return self._row @property def type(self): """ One of of the TOKEN_TYPE_* constants. """ return self._type @property def source_substring(self): """ The substring of the initial source file containing this token. """ return self._source_substring def __lt__(self, other): return isinstance(other, Token) and self.type < other.type def __repr__(self): return "{}: {}".format(self.type, self.source_substring) PK!V%poetry/toml/prettify/tokens/errors.pyfrom ..errors import TOMLError class DeserializationError(TOMLError): pass class BadEscapeCharacter(TOMLError): pass class MalformedDateError(DeserializationError): pass PK!)Mw&poetry/toml/prettify/tokens/py2toml.py """ A converter of python values to TOML Token instances. """ import datetime import re from poetry.utils._compat import basestring from .. import tokens from ..errors import TOMLError from ..tokens import Token from ..util import chunkate_string class NotPrimitiveError(TOMLError): pass _operator_tokens_by_type = { tokens.TYPE_OP_SQUARE_LEFT_BRACKET: tokens.Token( tokens.TYPE_OP_SQUARE_LEFT_BRACKET, u"[" ), tokens.TYPE_OP_SQUARE_RIGHT_BRACKET: tokens.Token( tokens.TYPE_OP_SQUARE_RIGHT_BRACKET, u"]" ), tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET: tokens.Token( tokens.TYPE_OP_DOUBLE_SQUARE_LEFT_BRACKET, u"[[" ), tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET: tokens.Token( tokens.TYPE_OP_DOUBLE_SQUARE_RIGHT_BRACKET, u"]]" ), tokens.TYPE_OP_COMMA: tokens.Token(tokens.TYPE_OP_COMMA, u","), tokens.TYPE_NEWLINE: tokens.Token(tokens.TYPE_NEWLINE, u"\n"), tokens.TYPE_OPT_DOT: tokens.Token(tokens.TYPE_OPT_DOT, u"."), } def operator_token(token_type): return _operator_tokens_by_type[token_type] def create_primitive_token(value, multiline_strings_allowed=True): """ Creates and returns a single token for the given primitive atomic value. Raises NotPrimitiveError when the given value is not a primitive atomic value """ if value is None: return create_primitive_token("") elif isinstance(value, bool): return tokens.Token(tokens.TYPE_BOOLEAN, u"true" if value else u"false") elif isinstance(value, int): return tokens.Token(tokens.TYPE_INTEGER, u"{}".format(value)) elif isinstance(value, float): return tokens.Token(tokens.TYPE_FLOAT, u"{}".format(value)) elif isinstance(value, (datetime.datetime, datetime.date, datetime.time)): return tokens.Token(tokens.TYPE_DATE, value.isoformat()) elif isinstance(value, basestring): return create_string_token( value, multiline_strings_allowed=multiline_strings_allowed ) raise NotPrimitiveError("{} of type {}".format(value, type(value))) _bare_string_regex = re.compile("^[a-zA-Z0-9_-]*$") def create_string_token( text, bare_string_allowed=False, multiline_strings_allowed=True ): """ Creates and returns a single string token. Raises ValueError on non-string input. """ if not isinstance(text, basestring): raise ValueError("Given value must be a string") if text == "": return tokens.Token( tokens.TYPE_STRING, '""'.format(_escape_single_line_quoted_string(text)) ) elif bare_string_allowed and _bare_string_regex.match(text): return tokens.Token(tokens.TYPE_BARE_STRING, text) elif multiline_strings_allowed and ( len(tuple(c for c in text if c == "\n")) >= 2 or len(text) > 80 ): # If containing two or more newlines or is longer than 80 characters we'll use the multiline string format return _create_multiline_string_token(text) else: return tokens.Token( tokens.TYPE_STRING, u'"{}"'.format(_escape_single_line_quoted_string(text)) ) def _escape_single_line_quoted_string(text): return text.replace('"', '\\"') def _create_multiline_string_token(text): escaped = text.replace(u'"""', u'"""') if len(escaped) > 50: return tokens.Token( tokens.TYPE_MULTILINE_STRING, u'"""\n{}\\\n"""'.format(_break_long_text(escaped)), ) else: return tokens.Token(tokens.TYPE_MULTILINE_STRING, u'"""{}"""'.format(escaped)) def _break_long_text(text, maximum_length=75): """ Breaks into lines of 75 character maximum length that are terminated by a backslash. """ def next_line(remaining_text): # Returns a line and the remaining text if "\n" in remaining_text and remaining_text.index("\n") < maximum_length: i = remaining_text.index("\n") return remaining_text[: i + 1], remaining_text[i + 2 :] elif len(remaining_text) > maximum_length and " " in remaining_text: i = remaining_text[:maximum_length].rfind(" ") return remaining_text[: i + 1] + "\\\n", remaining_text[i + 2 :] else: return remaining_text, "" remaining_text = text lines = [] while remaining_text: line, remaining_text = next_line(remaining_text) lines += [line] return "".join(lines) def create_whitespace(source_substring): return Token(tokens.TYPE_WHITESPACE, source_substring) def create_multiline_string(text, maximum_line_length=120): def escape(t): return t.replace(u'"""', u'"""') source_substring = u'"""\n{}"""'.format( u"\\\n".join(chunkate_string(escape(text), maximum_line_length)) ) return Token(tokens.TYPE_MULTILINE_STRING, source_substring) PK!i1&poetry/toml/prettify/tokens/toml2py.pyimport codecs import functools import operator import re import string from . import ( TYPE_BOOLEAN, TYPE_INTEGER, TYPE_FLOAT, TYPE_DATE, TYPE_MULTILINE_STRING, TYPE_BARE_STRING, TYPE_MULTILINE_LITERAL_STRING, TYPE_LITERAL_STRING, TYPE_STRING, ) from .errors import MalformedDateError from .errors import BadEscapeCharacter def deserialize(token): """ Deserializes the value of a single tokens.Token instance based on its type. Raises DeserializationError when appropriate. """ if token.type == TYPE_BOOLEAN: return _to_boolean(token) elif token.type == TYPE_INTEGER: return _to_int(token) elif token.type == TYPE_FLOAT: return _to_float(token) elif token.type == TYPE_DATE: return _to_date(token) elif token.type in ( TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_BARE_STRING, TYPE_LITERAL_STRING, TYPE_MULTILINE_LITERAL_STRING, ): return _to_string(token) else: raise Exception("This should never happen!") def _unescape_str(text): """ Unescapes a string according the TOML spec. Raises BadEscapeCharacter when appropriate. """ # Detect bad escape jobs bad_escape_regexp = re.compile(r'([^\\]|^)\\[^btnfr"\\uU]') if bad_escape_regexp.findall(text): raise BadEscapeCharacter # Do the unescaping return codecs.decode(_unicode_escaped_string(text), "unicode-escape") def _unicode_escaped_string(text): """ Escapes all unicode characters in the given string """ def is_unicode(c): return ( c.lower() not in string.ascii_letters + string.whitespace + string.punctuation + string.digits ) def escape_unicode_char(x): return codecs.encode(x, "unicode-escape") if any(is_unicode(c) for c in text): homogeneous_chars = tuple( escape_unicode_char(c) if is_unicode(c) else c.encode() for c in text ) homogeneous_bytes = functools.reduce(operator.add, homogeneous_chars) return homogeneous_bytes.decode() else: return text def _to_string(token): if token.type == TYPE_BARE_STRING: return token.source_substring elif token.type == TYPE_STRING: escaped = token.source_substring[1:-1] return _unescape_str(escaped) elif token.type == TYPE_MULTILINE_STRING: escaped = token.source_substring[3:-3] # Drop the first newline if existed if escaped and escaped[0] == "\n": escaped = escaped[1:] # Remove all occurrences of a slash-newline-zero-or-more-whitespace patterns escaped = re.sub(r"\\\n\s*", repl="", string=escaped, flags=re.DOTALL) return _unescape_str(escaped) elif token.type == TYPE_LITERAL_STRING: return token.source_substring[1:-1] elif token.type == TYPE_MULTILINE_LITERAL_STRING: text = token.source_substring[3:-3] if text[0] == "\n": text = text[1:] return text raise RuntimeError("Control should never reach here.") def _to_int(token): return int(token.source_substring.replace("_", "")) def _to_float(token): assert token.type == TYPE_FLOAT string = token.source_substring.replace("_", "") return float(string) def _to_boolean(token): return token.source_substring == "true" _correct_date_format = re.compile( r"(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(Z|([+-])(\d{2}):(\d{2}))" ) def _to_date(token): if not _correct_date_format.match(token.source_substring): raise MalformedDateError return token.source_substring PK!oSpoetry/toml/prettify/util.pyimport itertools def is_sequence_like(x): """ Returns True if x exposes a sequence-like interface. """ required_attrs = ("__len__", "__getitem__") return all(hasattr(x, attr) for attr in required_attrs) def is_dict_like(x): """ Returns True if x exposes a dict-like interface. """ required_attrs = ("__len__", "__getitem__", "keys", "values") return all(hasattr(x, attr) for attr in required_attrs) def join_with(iterable, separator): """ Joins elements from iterable with separator and returns the produced sequence as a list. separator must be addable to a list. """ inputs = list(iterable) b = [] for i, element in enumerate(inputs): if isinstance(element, (list, tuple, set)): b += tuple(element) else: b += [element] if i < len(inputs) - 1: b += separator return b def chunkate_string(text, length): """ Iterates over the given seq in chunks of at maximally the given length. Will never break a whole word. """ iterator_index = 0 def next_newline(): try: return next( i for (i, c) in enumerate(text) if i > iterator_index and c == "\n" ) except StopIteration: return len(text) def next_breaker(): try: return next( i for (i, c) in reversed(tuple(enumerate(text))) if i >= iterator_index and (i < iterator_index + length) and c in (" ", "\t") ) except StopIteration: return len(text) while iterator_index < len(text): next_chunk = text[iterator_index : min(next_newline(), next_breaker() + 1)] iterator_index += len(next_chunk) yield next_chunk def flatten_nested(nested_dicts): """ Flattens dicts and sequences into one dict with tuples of keys representing the nested keys. Example >>> dd = { \ 'dict1': {'name': 'Jon', 'id': 42}, \ 'dict2': {'name': 'Sam', 'id': 41}, \ 'seq1': [{'one': 1, 'two': 2}] \ } >>> flatten_nested(dd) == { \ ('dict1', 'name'): 'Jon', ('dict1', 'id'): 42, \ ('dict2', 'name'): 'Sam', ('dict2', 'id'): 41, \ ('seq1', 0, 'one'): 1, ('seq1', 0, 'two'): 2, \ } True """ assert isinstance( nested_dicts, (dict, list, tuple) ), "Only works with a collection parameter" def items(c): if isinstance(c, dict): return c.items() elif isinstance(c, (list, tuple)): return enumerate(c) else: raise RuntimeError("c must be a collection") def flatten(dd): output = {} for k, v in items(dd): if isinstance(v, (dict, list, tuple)): for child_key, child_value in flatten(v).items(): output[(k,) + child_key] = child_value else: output[(k,)] = v return output return flatten(nested_dicts) class PeekableIterator: # Returned by peek() when the iterator is exhausted. Truthiness is False. Nothing = tuple() def __init__(self, iter): self._iter = iter def __next__(self): return next(self._iter) def next(self): return self.__next__() def __iter__(self): return self def peek(self): """ Returns PeekableIterator.Nothing when the iterator is exhausted. """ try: v = next(self._iter) self._iter = itertools.chain((v,), self._iter) return v except StopIteration: return PeekableIterator.Nothing PK!)poetry/toml/raw.pyfrom .prettify.elements.abstracttable import AbstractTable def to_raw(x): from .cascadedict import CascadeDict if isinstance(x, AbstractTable): return x.primitive_value elif isinstance(x, CascadeDict): return x.neutralized elif isinstance(x, (list, tuple)): return [to_raw(y) for y in x] elif isinstance(x, dict): return {k: to_raw(v) for (k, v) in x.items()} else: return x PK!t=poetry/toml/structurer.pyfrom . import toplevels from .cascadedict import CascadeDict class NamedDict(dict): """ A dict that can use Name instances as keys. """ def __init__(self, other_dict=None): dict.__init__(self) if other_dict: for k, v in other_dict.items(): self[k] = v def __setitem__(self, key, value): """ key can be an Name instance. When key is a path in the form of an Name instance, all the parents and grandparents of the value are created along the way as instances of NamedDict. If the parent of the value exists, it is replaced with a CascadeDict() that cascades the old parent value with a new NamedDict that contains the given child name and value. """ if isinstance(key, toplevels.Name): obj = self for i, name in enumerate(key.sub_names): if name in obj: if i == len(key.sub_names) - 1: obj[name] = CascadeDict(obj[name], value) else: obj[name] = CascadeDict(NamedDict(), obj[name]) else: if i == len(key.sub_names) - 1: obj[name] = value else: obj[name] = NamedDict() obj = obj[name] else: return dict.__setitem__(self, key, value) def __contains__(self, item): try: _ = self[item] return True except KeyError: return False def append(self, key, value): """ Makes sure the value pointed to by key exists and is a list and appends the given value to it. """ if key in self: self[key].append(value) else: self[key] = [value] def __getitem__(self, item): if isinstance(item, toplevels.Name): d = self for name in item.sub_names: d = d[name] return d else: return dict.__getitem__(self, item) def __eq__(self, other): return dict.__eq__(self, other) def structure(table_toplevels): """ Accepts an ordered sequence of TopLevel instances and returns a navigable object structure representation of the TOML file. """ table_toplevels = tuple(table_toplevels) obj = NamedDict() last_array_of_tables = None # The Name of the last array-of-tables header for toplevel in table_toplevels: if isinstance(toplevel, toplevels.AnonymousTable): obj[""] = toplevel.table_element elif isinstance(toplevel, toplevels.Table): if last_array_of_tables and toplevel.name.is_prefixed_with( last_array_of_tables ): seq = obj[last_array_of_tables] unprefixed_name = toplevel.name.without_prefix(last_array_of_tables) seq[-1] = CascadeDict( seq[-1], NamedDict({unprefixed_name: toplevel.table_element}) ) else: obj[toplevel.name] = toplevel.table_element else: # It's an ArrayOfTables if ( last_array_of_tables and toplevel.name != last_array_of_tables and toplevel.name.is_prefixed_with(last_array_of_tables) ): seq = obj[last_array_of_tables] unprefixed_name = toplevel.name.without_prefix(last_array_of_tables) if unprefixed_name in seq[-1]: seq[-1][unprefixed_name].append(toplevel.table_element) else: cascaded_with = NamedDict( {unprefixed_name: [toplevel.table_element]} ) seq[-1] = CascadeDict(seq[-1], cascaded_with) else: obj.append(toplevel.name, toplevel.table_element) last_array_of_tables = toplevel.name return obj PK!p((((poetry/toml/toml_file.pyfrom .prettify.errors import NoArrayFoundError from . import structurer, toplevels, raw from .array import ArrayOfTables from .freshtable import FreshTable from .prettify.elements import factory as element_factory from .prettify import util class TOMLFile(dict): """ A TOMLFile object that tries its best to prserve formatting and order of mappings of the input source. Raises InvalidTOMLFileError on invalid input elements. Raises DuplicateKeysError, DuplicateTableError when appropriate. """ def __init__(self, _elements): self._elements = [] self._navigable = {} self.append_elements(_elements) def __getitem__(self, item): try: value = self._navigable[item] if isinstance(value, (list, tuple)): return ArrayOfTables(toml_file=self, name=item, iterable=value) else: return value except KeyError: return FreshTable(parent=self, name=item, is_array=False) def __contains__(self, item): return item in self.keys() def _setitem_with_key_seq(self, key_seq, value): """ Sets a the value in the TOML file located by the given key sequence. Example: self._setitem(('key1', 'key2', 'key3'), 'text_value') is equivalent to doing self['key1']['key2']['key3'] = 'text_value' """ table = self key_so_far = tuple() for key in key_seq[:-1]: key_so_far += (key,) self._make_sure_table_exists(key_so_far) table = table[key] table[key_seq[-1]] = value def _array_setitem_with_key_seq(self, array_name, index, key_seq, value): """ Sets a the array value in the TOML file located by the given key sequence. Example: self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value') is equivalent to doing self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value' """ table = self.array(array_name)[index] key_so_far = tuple() for key in key_seq[:-1]: key_so_far += (key,) new_table = self._array_make_sure_table_exists( array_name, index, key_so_far ) if new_table is not None: table = new_table else: table = table[key] table[key_seq[-1]] = value def _make_sure_table_exists(self, name_seq): """ Makes sure the table with the full name comprising of name_seq exists. """ t = self for key in name_seq[:-1]: t = t[key] name = name_seq[-1] if name not in t: self.append_elements( [ element_factory.create_table_header_element(name_seq), element_factory.create_table({}), ] ) def _array_make_sure_table_exists(self, array_name, index, name_seq): """ Makes sure the table with the full name comprising of name_seq exists. """ t = self[array_name][index] for key in name_seq[:-1]: t = t[key] name = name_seq[-1] if name not in t: new_table = element_factory.create_table({}) self.append_elements( [ element_factory.create_table_header_element( (array_name,) + name_seq ), new_table, ] ) return new_table def __delitem__(self, key): table_element_index = self._elements.index(self._navigable[key]) self._elements[table_element_index] = element_factory.create_table({}) self._on_element_change() def __setitem__(self, key, value): # Setting an array-of-tables if ( key and isinstance(value, (tuple, list)) and value and all(isinstance(v, dict) for v in value) ): for table in value: self.array(key).append(table) # Or setting a whole single table elif isinstance(value, dict): if key and key in self: del self[key] for key_seq, child_value in util.flatten_nested({key: value}).items(): self._setitem_with_key_seq(key_seq, child_value) # if key in self._navigable: # del self[key] # index = self._elements.index(self._navigable[key]) # self._elements = self._elements[:index] + [element_factory.create_table(value)] + self._elements[index+1:] # else: # if key: # self._elements.append(element_factory.create_table_header_element(key)) # self._elements.append(element_factory.create_table(value)) # Or updating the anonymous section table else: # It's mea self[""][key] = value self._on_element_change() def _detect_toplevels(self): """ Returns a sequence of TopLevel instances for the current state of this table. """ return tuple( e for e in toplevels.identify(self.elements) if isinstance(e, toplevels.Table) ) def _update_table_fallbacks(self, table_toplevels): """ Updates the fallbacks on all the table elements to make relative table access possible. Raises DuplicateKeysError if appropriate. """ if len(self.elements) <= 1: return def parent_of(toplevel): # Returns an TopLevel parent of the given entry, or None. for parent_toplevel in table_toplevels: if toplevel.name.sub_names[:-1] == parent_toplevel.name.sub_names: return parent_toplevel for entry in table_toplevels: if entry.name.is_qualified: parent = parent_of(entry) if parent: child_name = entry.name.without_prefix(parent.name) parent.table_element.set_fallback( {child_name.sub_names[0]: entry.table_element} ) def _recreate_navigable(self): if self._elements: self._navigable = structurer.structure(toplevels.identify(self._elements)) def array(self, name): """ Returns the array of tables with the given name. """ if name in self._navigable: if isinstance(self._navigable[name], (list, tuple)): return self[name] else: raise NoArrayFoundError else: return ArrayOfTables(toml_file=self, name=name) def _on_element_change(self): self._recreate_navigable() table_toplevels = self._detect_toplevels() self._update_table_fallbacks(table_toplevels) def append_elements(self, elements): """ Appends more elements to the contained internal elements. """ self._elements = self._elements + list(elements) self._on_element_change() def prepend_elements(self, elements): """ Prepends more elements to the contained internal elements. """ self._elements = list(elements) + self._elements self._on_element_change() def dumps(self): """ Returns the TOML file serialized back to str. """ return "".join(element.serialized() for element in self._elements) def dump(self, file_path): with open(file_path, mode="w") as fp: fp.write(self.dumps()) def keys(self): return set(self._navigable.keys()) | {""} def values(self): return self._navigable.values() def items(self): items = list(self._navigable.items()) def has_anonymous_entry(): return any(key == "" for (key, _) in items) if has_anonymous_entry(): return items else: return items + [("", self[""])] def get(self, item, default=None): return self._navigable.get(item, default) @property def primitive(self): """ Returns a primitive object representation for this container (which is a dict). WARNING: The returned container does not contain any markup or formatting metadata. """ raw_container = raw.to_raw(self._navigable) # Collapsing the anonymous table onto the top-level container is present if "" in raw_container: raw_container.update(raw_container[""]) del raw_container[""] return raw_container def append_fresh_table(self, fresh_table): """ Gets called by FreshTable instances when they get written to. """ if fresh_table.name: elements = [] if fresh_table.is_array: elements += [ element_factory.create_array_of_tables_header_element( fresh_table.name ) ] else: elements += [ element_factory.create_table_header_element(fresh_table.name) ] elements += [fresh_table, element_factory.create_newline_element()] self.append_elements(elements) else: # It's an anonymous table self.prepend_elements( [fresh_table, element_factory.create_newline_element()] ) @property def elements(self): return self._elements def __str__(self): is_empty = (not self[""]) and (not tuple(k for k in self.keys() if k)) def key_name(key): return "[ANONYMOUS]" if not key else key def pair(key, value): return "%s = %s" % (key_name(key), str(value)) content_text = ( "" if is_empty else "\n\t" + ",\n\t".join(pair(k, v) for (k, v) in self.items() if v) + "\n" ) return "TOMLFile{%s}" % content_text def __repr__(self): return str(self) PK!;?pppoetry/toml/toplevels.py""" Top-level entries in a TOML file. """ from .prettify import elements from .prettify.elements import TableElement, TableHeaderElement from .peekableit import PeekableIterator class TopLevel: """ A abstract top-level entry. """ def __init__(self, names, table_element): self._table_element = table_element self._names = Name(names) @property def table_element(self): return self._table_element @property def name(self): """ The distinct name of a table entry as an Name instance. """ return self._names class Name: def __init__(self, names): self._names = names @property def sub_names(self): return self._names def drop(self, n=0): """ Returns the name after dropping the first n entries of it. """ return Name(names=self._names[n:]) def is_prefixed_with(self, names): if isinstance(names, Name): return self.is_prefixed_with(names.sub_names) for i, name in enumerate(names): if self._names[i] != name: return False return True def without_prefix(self, names): if isinstance(names, Name): return self.without_prefix(names.sub_names) for i, name in enumerate(names): if name != self._names[i]: return Name(self._names[i:]) return Name(names=self.sub_names[len(names) :]) @property def is_qualified(self): return len(self._names) > 1 def __str__(self): return ".".join(self.sub_names) def __hash__(self): return hash(str(self)) def __eq__(self, other): return str(self) == str(other) def __ne__(self, other): return not self.__eq__(other) class AnonymousTable(TopLevel): def __init__(self, table_element): TopLevel.__init__(self, ("",), table_element) class Table(TopLevel): def __init__(self, names, table_element): TopLevel.__init__(self, names=names, table_element=table_element) class ArrayOfTables(TopLevel): def __init__(self, names, table_element): TopLevel.__init__(self, names=names, table_element=table_element) def _validate_file_elements(file_elements): pass def identify(file_elements): """ Outputs an ordered sequence of instances of TopLevel types. Elements start with an optional TableElement, followed by zero or more pairs of (TableHeaderElement, TableElement). """ if not file_elements: return _validate_file_elements(file_elements) # An iterator over enumerate(the non-metadata) elements iterator = PeekableIterator( (element_i, element) for (element_i, element) in enumerate(file_elements) if element.type != elements.TYPE_METADATA ) try: _, first_element = iterator.peek() if isinstance(first_element, TableElement): iterator.next() yield AnonymousTable(first_element) except KeyError: pass except StopIteration: return for element_i, element in iterator: if not isinstance(element, TableHeaderElement): continue # If TableHeader of a regular table, return Table following it if not element.is_array_of_tables: table_element_i, table_element = next(iterator) yield Table(names=element.names, table_element=table_element) # If TableHeader of an array of tables, do your thing else: table_element_i, table_element = next(iterator) yield ArrayOfTables(names=element.names, table_element=table_element) PK!poetry/utils/__init__.pyPK!iBpoetry/utils/_compat.pyimport sys try: import pathlib2 from pathlib2 import Path except ImportError: from pathlib import Path try: # Python 2 long = long unicode = unicode basestring = basestring except NameError: # Python 3 long = int unicode = str basestring = str PY2 = sys.version_info[0] == 2 PY36 = sys.version_info >= (3, 6) def decode(string, encodings=None): if not PY2 and not isinstance(string, bytes): return string if PY2 and isinstance(string, unicode): return string encodings = encodings or ["utf-8", "latin1", "ascii"] for encoding in encodings: try: return string.decode(encoding) except (UnicodeEncodeError, UnicodeDecodeError): pass return string.decode(encodings[0], errors="ignore") def encode(string, encodings=None): if not PY2 and isinstance(string, bytes): return string if PY2 and isinstance(string, str): return string encodings = encodings or ["utf-8", "latin1", "ascii"] for encoding in encodings: try: return string.encode(encoding) except (UnicodeEncodeError, UnicodeDecodeError): pass return string.encode(encodings[0], errors="ignore") def to_str(string): if isinstance(string, str) or not isinstance(string, (unicode, bytes)): return string if PY2: method = "encode" else: method = "decode" encodings = ["utf-8", "latin1", "ascii"] for encoding in encodings: try: return getattr(string, method)(encoding) except (UnicodeEncodeError, UnicodeDecodeError): pass return getattr(string, method)(encodings[0], errors="ignore") PK! Unix: ~/.cache/ (XDG default) Windows: C:\Users\\AppData\Local\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir`). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. """ if WINDOWS: # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": # Get the base path path = expanduser("~/Library/Caches") # Add our app name to it path = os.path.join(path, appname) else: # Get the base path path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) # Add our app name to it path = os.path.join(path, appname) return path def user_data_dir(appname, roaming=False): """ Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\ ... ...Application Data\ Win XP (roaming): C:\Documents and Settings\\Local ... ...Settings\Application Data\ Win 7 (not roaming): C:\\Users\\AppData\Local\ Win 7 (roaming): C:\\Users\\AppData\Roaming\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". """ if WINDOWS: const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) elif sys.platform == "darwin": path = os.path.join(expanduser("~/Library/Application Support/"), appname) else: path = os.path.join( os.getenv("XDG_DATA_HOME", expanduser("~/.local/share")), appname ) return path def user_config_dir(appname, roaming=True): """Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default True) can be set False to not use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: macOS: same as user_data_dir Unix: ~/.config/ Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/". """ if WINDOWS: path = user_data_dir(appname, roaming=roaming) elif sys.platform == "darwin": path = user_data_dir(appname) else: path = os.getenv("XDG_CONFIG_HOME", expanduser("~/.config")) path = os.path.join(path, appname) return path # for the discussion regarding site_config_dirs locations # see def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: macOS: /Library/Application Support// Unix: /etc or $XDG_CONFIG_DIRS[i]// for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == "darwin": pathlist = [os.path.join("/Library/Application Support", appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg") if xdg_config_dirs: pathlist = [ os.path.join(expanduser(x), appname) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append("/etc") return pathlist # -- Windows support functions -- def _get_win_folder_from_registry(csidl_name): """ This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders", ) directory, _type = _winreg.QueryValueEx(key, shell_folder_name) return directory def _get_win_folder_with_ctypes(csidl_name): csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value if WINDOWS: try: import ctypes _get_win_folder = _get_win_folder_with_ctypes except ImportError: _get_win_folder = _get_win_folder_from_registry def _win_path_to_bytes(path): """Encode Windows paths to bytes. Only used on Python 2. Motivation is to be consistent with other operating systems where paths are also returned as bytes. This avoids problems mixing bytes and Unicode elsewhere in the codebase. For more details and discussion see . If encoding using ASCII and MBCS fails, return the original Unicode path. """ for encoding in ("ASCII", "MBCS"): try: return path.encode(encoding) except (UnicodeEncodeError, LookupError): pass return path PK!<ddpoetry/utils/helpers.pyimport re import shutil import tempfile from contextlib import contextmanager from typing import Union from poetry.version import Version _canonicalize_regex = re.compile("[-_]+") def canonicalize_name(name): # type: (str) -> str return _canonicalize_regex.sub("-", name).lower() def module_name(name): # type: (str) -> str return canonicalize_name(name).replace(".", "_").replace("-", "_") def normalize_version(version): # type: (str) -> str return str(Version(version)) @contextmanager def temporary_directory(*args, **kwargs): try: from tempfile import TemporaryDirectory with TemporaryDirectory(*args, **kwargs) as name: yield name except ImportError: name = tempfile.mkdtemp(*args, **kwargs) yield name shutil.rmtree(name) def parse_requires(requires): # type: (str) -> Union[list, None] lines = requires.split("\n") requires_dist = [] in_section = False current_marker = None for line in lines: line = line.strip() if not line: if in_section: in_section = False continue if line.startswith("["): # extras or conditional dependencies marker = line.lstrip("[").rstrip("]") if ":" not in marker: extra, marker = marker, None else: extra, marker = marker.split(":") if extra: if marker: marker = '{} and extra == "{}"'.format(marker, extra) else: marker = 'extra == "{}"'.format(extra) if marker: current_marker = marker continue if current_marker: line = "{}; {}".format(line, current_marker) requires_dist.append(line) if requires_dist: return requires_dist PK!s((poetry/utils/toml_file.py# -*- coding: utf-8 -*- import toml from poetry.toml import dumps from poetry.toml import loads from poetry.toml import TOMLFile from ._compat import Path class TomlFile: def __init__(self, path): self._path = Path(path) @property def path(self): return self._path def read(self, raw=False): # type: (bool) -> dict with self._path.open() as f: if raw: return toml.loads(f.read()) return loads(f.read()) def write(self, data): # type: (...) -> None if not isinstance(data, TOMLFile): data = toml.dumps(data) else: data = dumps(data) with self._path.open("w") as f: f.write(data) def __getattr__(self, item): return getattr(self._path, item) PK!tw'w'poetry/utils/venv.pyimport os import platform import subprocess import sys import sysconfig import warnings from contextlib import contextmanager from subprocess import CalledProcessError from poetry.config import Config from poetry.locations import CACHE_DIR from poetry.utils._compat import Path from poetry.utils._compat import decode class VenvError(Exception): pass class VenvCommandError(VenvError): def __init__(self, e): # type: (CalledProcessError) -> None message = "Command {} errored with the following output: \n{}".format( e.cmd, decode(e.output) ) super(VenvCommandError, self).__init__(message) class Venv(object): def __init__(self, venv=None): self._venv = venv if self._venv: self._venv = Path(self._venv) self._windows = sys.platform == "win32" self._bin_dir = None if venv: bin_dir = "bin" if not self._windows else "Scripts" self._bin_dir = self._venv / bin_dir self._version_info = None self._python_implementation = None @classmethod def create(cls, io, name=None, cwd=None): # type: (...) -> Venv if "VIRTUAL_ENV" not in os.environ: # Not in a virtualenv # Checking if we need to create one # First we check if there is a .venv # at the root of the project. if cwd and (cwd / ".venv").exists(): venv = cwd / ".venv" else: config = Config.create("config.toml") create_venv = config.setting("settings.virtualenvs.create") root_venv = config.setting("settings.virtualenvs.in-project") venv_path = config.setting("settings.virtualenvs.path") if root_venv: if not cwd: raise RuntimeError( "Unbale to determine the project's directory" ) venv_path = cwd / ".venv" elif venv_path is None: venv_path = Path(CACHE_DIR) / "virtualenvs" else: venv_path = Path(venv_path) if not name: name = Path.cwd().name name = "{}-py{}".format( name, ".".join([str(v) for v in sys.version_info[:2]]) ) if root_venv: venv = venv_path else: venv = venv_path / name if not venv.exists(): if create_venv is False: io.writeln( "" "Skipping virtualenv creation, " "as specified in config file." "" ) return cls() io.writeln( "Creating virtualenv {} in {}".format( name, str(venv_path) ) ) cls.build(str(venv)) else: if io.is_very_verbose(): io.writeln( "Virtualenv {} already exists.".format(name) ) os.environ["VIRTUAL_ENV"] = str(venv) # venv detection: # stdlib venv may symlink sys.executable, so we can't use realpath. # but others can symlink *to* the venv Python, # so we can't just use sys.executable. # So we just check every item in the symlink tree (generally <= 3) p = os.path.normcase(sys.executable) paths = [p] while os.path.islink(p): p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p))) paths.append(p) p_venv = os.path.normcase(os.environ["VIRTUAL_ENV"]) if any(p.startswith(p_venv) for p in paths): # Running properly in the virtualenv, don't need to do anything return cls() venv = os.environ["VIRTUAL_ENV"] return cls(venv) @classmethod def build(cls, path): try: from venv import EnvBuilder builder = EnvBuilder(with_pip=True) build = builder.create except ImportError: # We fallback on virtualenv for Python 2.7 from virtualenv import create_environment build = create_environment build(path) @property def venv(self): return self._venv @property def python(self): # type: () -> str """ Path to current python executable """ return self._bin("python") @property def pip(self): # type: () -> str """ Path to current pip executable """ return self._bin("pip") @property def version_info(self): # type: () -> tuple if self._version_info is not None: return self._version_info if not self.is_venv(): self._version_info = sys.version_info else: output = self.run( "python", "-c", "\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\"", shell=True, ) self._version_info = tuple([int(s) for s in output.strip().split(".")]) return self._version_info @property def python_implementation(self): if self._python_implementation is not None: return self._python_implementation if not self.is_venv(): impl = platform.python_implementation() else: impl = self.run( "python", "-c", '"import platform; print(platform.python_implementation())"', shell=True, ).strip() self._python_implementation = impl return self._python_implementation def config_var(self, var): if not self.is_venv(): try: return sysconfig.get_config_var(var) except IOError as e: warnings.warn("{0}".format(e), RuntimeWarning) return None try: value = self.run( "python", "-c", '"import sysconfig; ' "print(sysconfig.get_config_var('{}'))\"".format(var), shell=True, ).strip() except VenvCommandError as e: warnings.warn("{0}".format(e), RuntimeWarning) return None if value == "None": value = None elif value == "1": value = 1 elif value == "0": value = 0 return value def run(self, bin, *args, **kwargs): """ Run a command inside the virtual env. """ if self._windows: bin = self._bin(bin) cmd = [bin] + list(args) shell = kwargs.get("shell", False) call = kwargs.pop("call", False) if shell: cmd = " ".join(cmd) try: if not self.is_venv(): if call: return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs) output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, **kwargs ) else: if self._windows: kwargs["shell"] = True with self.temp_environ(): os.environ["PATH"] = self._path() os.environ["VIRTUAL_ENV"] = str(self._venv) self.unset_env("PYTHONHOME") self.unset_env("__PYVENV_LAUNCHER__") if call: return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs) output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, **kwargs ) except CalledProcessError as e: raise VenvCommandError(e) return decode(output) def execute(self, bin, *args, **kwargs): if not self.is_venv(): return subprocess.call([bin] + list(args)) else: if self._windows: bin = self._bin(bin) with self.temp_environ(): os.environ["PATH"] = self._path() os.environ["VIRTUAL_ENV"] = str(self._venv) self.unset_env("PYTHONHOME") self.unset_env("__PYVENV_LAUNCHER__") return subprocess.call([bin] + list(args), **kwargs) @contextmanager def temp_environ(self): environ = dict(os.environ) try: yield finally: os.environ.clear() os.environ.update(environ) def _path(self): return os.pathsep.join([str(self._bin_dir), os.environ["PATH"]]) def unset_env(self, key): if key in os.environ: del os.environ[key] def get_shell(self): shell = Path(os.environ.get("SHELL", "")).stem if shell in ("bash", "zsh", "fish"): return shell def _bin(self, bin): # type: (str) -> str """ Return path to the given executable. """ if not self.is_venv(): return bin bin_path = (self._bin_dir / bin).with_suffix(".exe" if self._windows else "") if not bin_path.exists(): return bin return str(bin_path) def is_venv(self): # type: () -> bool return self._venv is not None class NullVenv(Venv): def __init__(self, execute=False): super(NullVenv, self).__init__() self.executed = [] self._execute = execute def run(self, bin, *args): self.executed.append([bin] + list(args)) if self._execute: return super(NullVenv, self).run(bin, *args) def _bin(self, bin): return bin PK!N8poetry/vcs/__init__.pyfrom poetry.utils._compat import Path from .git import Git def get_vcs(directory): # type: (Path) -> Git directory = directory.resolve() for p in [directory] + list(directory.parents): if (p / ".git").is_dir(): return Git(p) PK!-L' ' poetry/vcs/git.py# -*- coding: utf-8 -*- import re import subprocess from poetry.utils._compat import decode class GitConfig: def __init__(self): self._config = {} try: config_list = decode( subprocess.check_output( ["git", "config", "-l"], stderr=subprocess.STDOUT ) ) m = re.findall("(?ms)^([^=]+)=(.*?)$", config_list) if m: for group in m: self._config[group[0]] = group[1] except subprocess.CalledProcessError: pass def get(self, key, default=None): return self._config.get(key, default) def __getitem__(self, item): return self._config[item] class Git: def __init__(self, work_dir=None): self._config = GitConfig() self._work_dir = work_dir @property def config(self): # type: () -> GitConfig return self._config def clone(self, repository, dest): # type: (...) -> str return self.run("clone", repository, str(dest)) def checkout(self, rev, folder=None): # type: (...) -> str args = [] if folder is None and self._work_dir: folder = self._work_dir if folder: args += [ "--git-dir", (folder / ".git").as_posix(), "--work-tree", folder.as_posix(), ] args += ["checkout", rev] return self.run(*args) def rev_parse(self, rev, folder=None): # type: (...) -> str args = [] if folder is None and self._work_dir: folder = self._work_dir if folder: args += [ "--git-dir", (folder / ".git").as_posix(), "--work-tree", folder.as_posix(), ] args += ["rev-parse", rev] return self.run(*args) def get_ignored_files(self, folder=None): # type: (...) -> list args = [] if folder is None and self._work_dir: folder = self._work_dir if folder: args += [ "--git-dir", (folder / ".git").as_posix(), "--work-tree", folder.as_posix(), ] args += ["ls-files", "--others", "-i", "--exclude-standard"] output = self.run(*args) return output.split("\n") def run(self, *args): # type: (...) -> str return decode( subprocess.check_output(["git"] + list(args), stderr=subprocess.STDOUT) ) PK!Nt)upoetry/version/__init__.pyimport operator from typing import Union from .exceptions import InvalidVersion from .legacy_version import LegacyVersion from .version import Version OP_EQ = operator.eq OP_LT = operator.lt OP_LE = operator.le OP_GT = operator.gt OP_GE = operator.ge OP_NE = operator.ne _trans_op = { "=": OP_EQ, "==": OP_EQ, "<": OP_LT, "<=": OP_LE, ">": OP_GT, ">=": OP_GE, "!=": OP_NE, } def parse( version, strict=False # type: str # type: bool ): # type:(...) -> Union[Version, LegacyVersion] """ Parse the given version string and return either a :class:`Version` object or a LegacyVersion object depending on if the given version is a valid PEP 440 version or a legacy version. If strict=True only PEP 440 versions will be accepted. """ try: return Version(version) except InvalidVersion: if strict: raise return LegacyVersion(version) PK!bVpoetry/version/base.pyclass BaseVersion: def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, BaseVersion): return NotImplemented return method(self._key, other._key) PK!XE%,,poetry/version/exceptions.pyclass InvalidVersion(ValueError): pass PK!9poetry/version/helpers.pyfrom poetry.semver import parse_constraint from poetry.semver import VersionUnion PYTHON_VERSION = [ "2.7.*", "3.0.*", "3.1.*", "3.2.*", "3.3.*", "3.4.*", "3.5.*", "3.6.*", "3.7.*", "3.8.*", ] def format_python_constraint(constraint): """ This helper will help in transforming disjunctive constraint into proper constraint. """ if not isinstance(constraint, VersionUnion): return str(constraint) formatted = [] accepted = [] for version in PYTHON_VERSION: version_constraint = parse_constraint(version) matches = constraint.allows_any(version_constraint) if not matches: formatted.append("!=" + version) else: accepted.append(version) # Checking lower bound low = accepted[0] formatted.insert(0, ">=" + ".".join(low.split(".")[:2])) return ", ".join(formatted) PK!Ѷ poetry/version/legacy_version.pyimport re from .base import BaseVersion class LegacyVersion(BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False _legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts PK!}?poetry/version/markers.pyimport operator from pyparsing import ParseException, ParseResults, stringStart, stringEnd from pyparsing import ZeroOrMore, Group, Forward, QuotedString from pyparsing import Literal as L # noqa class InvalidMarker(ValueError): """ An invalid marker was found, users should refer to PEP 508. """ class UndefinedComparison(ValueError): """ An invalid operation was attempted on a value that doesn't support it. """ class UndefinedEnvironmentName(ValueError): """ A name was attempted to be used that does not exist inside of the environment. """ class Node(object): def __init__(self, value): self.value = value def __str__(self): return str(self.value) def __repr__(self): return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) def serialize(self): raise NotImplementedError class Variable(Node): def serialize(self): return str(self) class Value(Node): def serialize(self): return '"{0}"'.format(self) class Op(Node): def serialize(self): return str(self) VARIABLE = ( L("implementation_version") | L("platform_python_implementation") | L("implementation_name") | L("python_full_version") | L("platform_release") | L("platform_version") | L("platform_machine") | L("platform_system") | L("python_version") | L("sys_platform") | L("os_name") | L("os.name") | L("sys.platform") # PEP-345 | L("platform.version") # PEP-345 | L("platform.machine") # PEP-345 | L("platform.python_implementation") # PEP-345 | L("python_implementation") # PEP-345 | L("extra") # undocumented setuptools legacy ) ALIASES = { "os.name": "os_name", "sys.platform": "sys_platform", "platform.version": "platform_version", "platform.machine": "platform_machine", "platform.python_implementation": "platform_python_implementation", "python_implementation": "platform_python_implementation", } VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) VERSION_CMP = ( L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") ) MARKER_OP = VERSION_CMP | L("not in") | L("in") MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) MARKER_VALUE = QuotedString("'") | QuotedString('"') MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) BOOLOP = L("and") | L("or") MARKER_VAR = VARIABLE | MARKER_VALUE MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) LPAREN = L("(").suppress() RPAREN = L(")").suppress() MARKER_EXPR = Forward() MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) MARKER = stringStart + MARKER_EXPR + stringEnd def _coerce_parse_result(results): if isinstance(results, ParseResults): return [_coerce_parse_result(i) for i in results] else: return results def _format_marker(marker, first=True): assert isinstance(marker, (list, tuple, str)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. if ( isinstance(marker, list) and len(marker) == 1 and isinstance(marker[0], (list, tuple)) ): return _format_marker(marker[0]) if isinstance(marker, list): inner = (_format_marker(m, first=False) for m in marker) if first: return " ".join(inner) else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): return " ".join([m.serialize() for m in marker]) else: return marker _operators = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def format_full_version(info): version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": version += kind[0] + str(info.serial) return version class Marker(object): def __init__(self, marker): try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( marker, marker[e.loc : e.loc + 8] ) raise InvalidMarker(err_str) @property def markers(self): return self._markers def __str__(self): return _format_marker(self._markers) def __repr__(self): return "".format(str(self)) PK!""poetry/version/requirements.py# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import string import re try: import urllib.parse as urlparse except ImportError: from urlparse import urlparse from pyparsing import stringStart, stringEnd, originalTextFor, ParseException from pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from pyparsing import Literal as L # noqa from poetry.semver import parse_constraint from .markers import MARKER_EXPR, Marker LEGACY_REGEX = ( r""" (?P(==|!=|<=|>=|<|>)) \s* (?P [^,;\s)]* # Since this is a "legacy" specifier, and the version # string can be just about anything, we match everything # except for whitespace, a semi-colon for marker support, # a closing paren since versions can be enclosed in # them, and a comma since it's a version separator. ) """ ) REGEX = ( r""" (?P(~=|==|!=|<=|>=|<|>|===)) (?P (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?".format(str(self)) PK!i|poetry/version/utils.pyclass Infinity(object): def __repr__(self): return "Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return False def __le__(self, other): return False def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return True def __ge__(self, other): return True def __neg__(self): return NegativeInfinity Infinity = Infinity() class NegativeInfinity(object): def __repr__(self): return "-Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return True def __le__(self, other): return True def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return False def __ge__(self, other): return False def __neg__(self): return Infinity NegativeInfinity = NegativeInfinity() PK!;poetry/version/version.pyimport re from collections import namedtuple from itertools import dropwhile from .base import BaseVersion from .exceptions import InvalidVersion from .utils import Infinity _Version = namedtuple("_Version", ["epoch", "release", "dev", "pre", "post", "local"]) VERSION_PATTERN = re.compile( """ ^ v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
            [-_.]?
            (?P(a|b|c|rc|alpha|beta|pre|preview))
            [-_.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_.]?
                (?Ppost|rev|r)
                [-_.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_.]?
            (?Pdev)
            [-_.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_.][a-z0-9]+)*))?       # local version
    $
""",
    re.IGNORECASE | re.VERBOSE,
)


class Version(BaseVersion):
    def __init__(self, version):
        # Validate the version and parse it into pieces
        match = VERSION_PATTERN.match(version)
        if not match:
            raise InvalidVersion("Invalid version: '{0}'".format(version))

        # Store the parsed out pieces of the version
        self._version = _Version(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=tuple(int(i) for i in match.group("release").split(".")),
            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
            post=_parse_letter_version(
                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
            ),
            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
            local=_parse_local_version(match.group("local")),
        )

        # Generate a key which will be used for sorting
        self._key = _cmpkey(
            self._version.epoch,
            self._version.release,
            self._version.pre,
            self._version.post,
            self._version.dev,
            self._version.local,
        )

    def __repr__(self):
        return "".format(repr(str(self)))

    def __str__(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        # Pre-release
        if self._version.pre is not None:
            parts.append("".join(str(x) for x in self._version.pre))

        # Post-release
        if self._version.post is not None:
            parts.append(".post{0}".format(self._version.post[1]))

        # Development release
        if self._version.dev is not None:
            parts.append(".dev{0}".format(self._version.dev[1]))

        # Local version segment
        if self._version.local is not None:
            parts.append("+{0}".format(".".join(str(x) for x in self._version.local)))

        return "".join(parts)

    @property
    def public(self):
        return str(self).split("+", 1)[0]

    @property
    def base_version(self):
        parts = []

        # Epoch
        if self._version.epoch != 0:
            parts.append("{0}!".format(self._version.epoch))

        # Release segment
        parts.append(".".join(str(x) for x in self._version.release))

        return "".join(parts)

    @property
    def local(self):
        version_string = str(self)
        if "+" in version_string:
            return version_string.split("+", 1)[1]

    @property
    def is_prerelease(self):
        return bool(self._version.dev or self._version.pre)

    @property
    def is_postrelease(self):
        return bool(self._version.post)


def _parse_letter_version(letter, number):
    if letter:
        # We consider there to be an implicit 0 in a pre-release if there is
        # not a numeral associated with it.
        if number is None:
            number = 0

        # We normalize any letters to their lower case form
        letter = letter.lower()

        # We consider some words to be alternate spellings of other words and
        # in those cases we want to normalize the spellings to our preferred
        # spelling.
        if letter == "alpha":
            letter = "a"
        elif letter == "beta":
            letter = "b"
        elif letter in ["c", "pre", "preview"]:
            letter = "rc"
        elif letter in ["rev", "r"]:
            letter = "post"

        return letter, int(number)
    if not letter and number:
        # We assume if we are given a number, but we are not given a letter
        # then this is using the implicit post release syntax (e.g. 1.0-1)
        letter = "post"

        return letter, int(number)


_local_version_seperators = re.compile(r"[._-]")


def _parse_local_version(local):
    """
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    """
    if local is not None:
        return tuple(
            part.lower() if not part.isdigit() else int(part)
            for part in _local_version_seperators.split(local)
        )


def _cmpkey(epoch, release, pre, post, dev, local):
    # When we compare a release version, we want to compare it with all of the
    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    # leading zeros until we come to something non zero, then take the rest
    # re-reverse it back into the correct order and make it a tuple and use
    # that for our sorting key.
    release = tuple(reversed(list(dropwhile(lambda x: x == 0, reversed(release)))))

    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    # We'll do this by abusing the pre segment, but we _only_ want to do this
    # if there is not a pre or a post segment. If we have one of those then
    # the normal sorting rules will handle this case correctly.
    if pre is None and post is None and dev is not None:
        pre = -Infinity

    # Versions without a pre-release (except as noted above) should sort after
    # those with one.
    elif pre is None:
        pre = Infinity

    # Versions without a post segment should sort before those with one.
    if post is None:
        post = -Infinity

    # Versions without a development segment should sort after those with one.
    if dev is None:
        dev = Infinity

    if local is None:
        # Versions without a local segment should sort before those with one.
        local = -Infinity
    else:
        # Versions with a local segment need that segment parsed to implement
        # the sorting rules in PEP440.
        # - Alpha numeric segments sort before numeric segments
        # - Alpha numeric segments sort lexicographically
        # - Numeric segments sort numerically
        # - Shorter versions sort before longer versions when the prefixes
        #   match exactly
        local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local)

    return epoch, release, pre, post, dev, local
PK!b{"poetry/version/version_selector.pyfrom typing import Union

from poetry.packages import Package
from poetry.semver import parse_constraint
from poetry.semver import Version


class VersionSelector(object):
    def __init__(self, pool):
        self._pool = pool

    def find_best_candidate(
        self,
        package_name,  # type: str
        target_package_version=None,  # type:  Union[str, None]
        allow_prereleases=False,  # type: bool
    ):  # type: (...) -> Union[Package, bool]
        """
        Given a package name and optional version,
        returns the latest Package that matches
        """
        if target_package_version:
            constraint = parse_constraint(target_package_version)
        else:
            constraint = None

        candidates = self._pool.find_packages(
            package_name, constraint, allow_prereleases=allow_prereleases
        )

        if not candidates:
            return False

        # Select highest version if we have many
        package = candidates[0]
        for candidate in candidates:
            if candidate.is_prerelease() and not allow_prereleases:
                continue

            # Select highest version of the two
            if package.version < candidate.version:
                package = candidate

        return package

    def find_recommended_require_version(self, package):
        version = package.version

        return self._transform_version(version.text, package.pretty_version)

    def _transform_version(self, version, pretty_version):
        # attempt to transform 2.1.1 to 2.1
        # this allows you to upgrade through minor versions
        try:
            parsed = Version.parse(version)
            parts = [parsed.major, parsed.minor, parsed.patch]
        except ValueError:
            return pretty_version

        # check to see if we have a semver-looking version
        if len(parts) == 3:
            # remove the last parts (the patch version number and any extra)
            if parts[0] != 0:
                del parts[2]

            version = ".".join(str(p) for p in parts)
            if parsed.is_prerelease():
                version += "-{}".format(".".join(str(p) for p in parsed.prerelease))
        else:
            return pretty_version

        return "^{}".format(version)
PK!Hi&.(poetry-0.10.2.dist-info/entry_points.txtN+I/N.,()*O-)PzPi<..PK!5&&poetry-0.10.2.dist-info/LICENSECopyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PK!H$8XVXpoetry-0.10.2.dist-info/WHEEL
A
 н#Z;/"
bFF]xzwK;<*mTֻ`VJs)猶j>GbaPn>PK!H+Ye4fpZ poetry-0.10.2.dist-info/METADATA\FvhR1ɒh#Kz*d6Ix@PT<*/țIs
-'.z\siuyV8gzZD-&/&ZZBh6R^V#(MUlj(x&:_zUjԎ8hgu>nr.qU:WdDjqTng^6ij8<'épOi1YԷrYzB߱eYx :=YP{C^|:჏q{㳏q{tX	,*.zouV,C&&hVdu|˸Lu-E1i2]4Jd""j947x`kiwJζ[dA0z}cjx4*
ZF[2TI?%Sͭh
{H>R=-d׸\/cOw,q6UM%$';q+!ma
Ň98G],2ib4;9#bc:m?zOuƳ0?czBfR ^AĜyڑ)'\I[5fzߚojaQΎIZ]uiKu;;3\gJ+ cxSi5m$J:t:Aeޔt4<ժkUyMKnF^uCo2A2^7e"bЮoVRy]`f,JY,N~KsN8J'fXbaK)*A,a"YKҪ@zLKtCRVH5ֺP㵺!2}GJ:z=O+E%bA/'ji,3-3c+Qσ)35.3]
,'4E<"`>$fE<]G"Eɮꤩ53)
vYS2 %HU*K2UDnShEZvTwl2]bڿXUKF&vq@hZys[FKVlj.#ZTgqC:u_y/q+y"SyXwnG?:x
(	zZ`Rq``U0[\w	ںYL
5=tF_^! .[}Dd Xdpaམ,CŠuam`fY1&<σ\'²:N1<DUd
[C+2%
GE0-4ʓ")Ť:r~clO?a0HOhjs'kнd{O-qnxT&@Ƒc%wFGP5ic<#u?NHNI$᧑U\5ڹ*lZ`/|h刉-NgM)}O&U6НHBѕa
U"$2CST˯_r4
p纾[1/	cGiI!*+BtͪsG B;z.)JB=Z]&"9e9.?9?>gr8vSILi@H戫U飭ޖ0[̸(1G;z|96^p`9A|P[ye9 Zl5H'2dbw}-t@sxC\df-XT12DIZU
E/U	OI</Ag.=i=F.0k-1+(nuK\2@Yfg?
N?޿c$ƞqOLXWrZ=Sn
$
z
vvF+0^[&Bqִ9#٦SG_˒])%Z+vD[*,qXm̐1lʴgm	jyZmv'଍2Z;Rғ&oh3Kc2pr=,M-`lFh-dnDŸ)kx*) L?˨v|j:T" wSkCN6Zpۘ
DKa #VbQmf:7pmq꺵6oJ'E'+GQWGif綊c-gj컌+頬{*D]MRU}/!WќJtoBt{¯#U3Q܊	-w1sV*3J
vD\eď?_*DjWs3Jpz4ġR;z`B҂%&WJ>~24\ZPKB?< '&8cGLqé4O\ 򲁠W\r(S{UGpjT(RK11Aue%IdQcB
`'r2As,:bFBƵ8,$`ʧVPQ76fa%DMә]ھveF̷apǺzBYV[oJ-i52ç}az@Aa(A_Haɵ
p7<(zn8LmH"64`dr
"f&1#$䚜A=kO^7\?-]mk(ܥ|qy$G2	]V!;ǎCCFHK
?!wJ-}{6%~?2J>[w`vI*/nL4*{BfjhRPb3o$(*lu;=[VfVe}K'x+u<ӄ
YbloXI˽c_~q =#%fGDJg=>~ paux(50\V#;a&EtDM潽
ePTj ާxzZE>|BMRNvE9P4?KqʢYX?RA.o$PQKFk&
Gؒ6;8^슏;Dg&~1u_g?-j[TݞX 

ENy6|8FewnV,ܙ,av
K
.?]YkV%/N35:x.}|(E}|> ~G^/qx_	<7[?=;.`kIE:GdÇt$<4VI7h7Cֳ;/ڋr&ḸJm4btS|㈍%>xѹ}IPȃ3o/{Q-Ձ0Z߆B)gd	>sPcu^)ޠTPȤ]8Yt*HmqF[^$%c	?BFKĶoP7[oם{x{XWU~J*M]ɇ,LZ~$ז#{.xSu
[τcqJ}Kb[tc`U)K"Xgqn6Of\퓏c{7۝Hh5!z7:ϱ1i^w&sKeMRUz%c
\6E_v~ ;vsWG?6+a=4l\qq^;v.,ynaDఀS6cot'0gqDž𞹰f\C[#b"v"Y&jc"L_S`2uua~04HKlx8e:Iv mcz4[qH&]@_$/u!*s7RԛX+Eyw)l{O94mʫ_A0{U'w;,7t(v
lRd?/ ֲZ'r'{zk$/n>>ZFb܃s8~C${b4L钠R[]Z"F!HY3CqZ3t1m̑~
K~\EPVy>(59)~eXGb6s!)Z0elSfzŘN}smDk	@8+f".+0`{f$o{%ۺz%)ǫo*^^4n'SK.{aW
A/PK!H&˂l?poetry-0.10.2.dist-info/RECORDrȲYȽAOxok-QqL$
gV¾]8IzuB-LAzDw96B*iAԿIH'iSkH܁x)e2^b;a%gP.uZ|L':\t6ĨAA?q'qYoމi?
^t6r=txtһ%<*$~Vl4l[3Dx_cWthKf0
% D_@]Op!Q5]c5_02yپ^%}u'o{a*5QuG쨖y_05CWRmzŨW_[ݻV`Tb@J~sMM:N\wj⍪ۡ#"/spUrh~ᱭXKT8dk6"
8Z[Bm~L?G^H4qhv4ْCZ;<`ޫ!{ΖGvDkhgn[^+2m0&N"˜ :eNa$p$$7ֲjz9y.sHSWkF‚#N;gOʨzaxj[+#	l
q10tH	[xQ)xjPlKU|ΰJGZs.=A	լ@/R}<}s_ؿ:Oai`A78ie'[PX!a~X?no*#SD:@T*F^uoA?}_/\fzzlMQ|/]pqSt`-s$|_5䠏\,FqSA4CئGW'{[;j$I%Nelx:	{ԎV)N618\VUvmtY"+&g]s_-mJE{;a^'فz)W
l'7vG`6zu+ȏT~A9Y
}u})W%钗:ՏI^z"O؅9%sȲ`t+p-cSu'mq^Qx3OX6n\0*맹TpX;iI;HiSW/X=n`y6<ˠmp\yٲ7kf	\sr4zf,>	S~{ͳm_>ʥ&ԘlKwק!ɕVhZٖd{!5ԡu+OP1LF4_dX{>W_uձ)~5|VE՗9H],	nuGY**9 LJxUl__p1b2i+*SXrpP1- v0P# -$;9SttNk7SanB}
ҒUؑ`eƻ(ޛoDxbePiꙤ|[Od-zӹ(qS%u)6tY%a;ޫuk;1
c]-}ă7+QݬoU-ڍ&EyW;31F_ifO;zsWh!)hߦ3?52m#JHmU0{uYTF9;>Yc|C$n:UM%Gc[D6W`=ϧy4	T&sWkgԩպuWSH]sd-r!#hGoH뒸,u#=v;NAAx(^Gp=><%\:oM}5}!}nw%Q9SR|'T	6A4v$|G&3mK-"lYG곩}r/uI/ȆpZPa?ma}?Dndڽ )ꢰ+-CC(Trl/\}V_:rīZWД$
'L2öM,i/	7
!]=G_#͎u=u?O~}{ S@S$`;{
G_LQ2
n~`Ѭ޷+R-hŗ>635p:yQ~(2M֌DTBsQ=*
X!?BXύn?ؗ@+\hG^Lo2l@vDŽkrd^nFɬb	PMglbDq+UspyGs_VE4PzLK5$Q9-#ǃߍuXaFvN\15Wj>dvyȣشv[F8P6]|Z{ٸ8xɋGcϦzmjM+PrɎA?J=:<{ LY8&_~QEzWW k2{KoݳI)<,CFMrGuWzWpe{ᵑ}꽣vUt$Na![=Dw񎄱*|Gz~XGջz&\R됿 
L<&ȁc_yR@ wQ: @kTKOQB+e*e%󓔮t?cTqRL*E:\VO%9-k~w;%9@z\ɜI]7vw*~lPs܌f'mk]n/pi%q
7ژm%j(U%ӔHNi}~!MݠxgdzlH9eŠʉ8	 ?}TOFKUgHٝ}=ҳͦ1]aW뾠||Qz*\:F5lmό#~KR9aUp<.bZM[O{9I}zn!g
QX;0^U:y]
|u-:2{A<شK7RI[߄nܾ\NBlsuUz2LxPU:[;Y uؔq]3ss2C`Rr g%,
Iߢ+[ޙa*qizױ>[0D`<`㬶͟9qGFRQC~?t8%RA}580Hj$tֺ0%E6U۷ר˙
\V
Oٳ+FuOFЏ>ĵ.R28RLŵwLXB0~sR`57=¤1",Z%>ӣa:yš܇]ю(pIt;amfaq^&6	z#6Qh]9y7uV}R,[$#v0b'/[+W5J<{&*iCg+r8cݺw{}fF(wÔr!fzhiA$V.kDӜCppt)ƼtN#śD?r_d-f
ETF&vq/dԆݣwOς=ڶ%|MS
N
L'M@Xa}Ld:-X2ꃊ/l:`dH/o]a''j|T~j*|.Ǡ+96Gv'L&ZI-G
ORH<(A8Kwm>Ddžylcm
LfQoC]'p%e`\:{uv?0F|"U(iP{my&i/<(r.bl3(D7y%H2R;@>5vyz
4#ϒy 8J}v]Z0B`#MQv {x*hW>t/Q{7רh%+n/7c8/~ضէs+b]ɔ:#tKh[Hf"6B*FGEx{jjJ.j1BJ8t,)
mr+ï>4I#,e#[i#<؃rj"Wgغys
!Tr#:s'z9/#|}:+ƶU#I7YYH7-7"$jE3>XFxmr}xص[{WqHYL@I|Dm:fph{vE>zgZtaOAX>u2b3|!A!izD#Ia–S3qEJ`V
)ܦP^[Ua%c;e=휉xJ;~v	,ֻbY(yGRh
98~/
Id[}+IXqgS2p$F[V8_ =yT5{{PG60KOѓ%gzmZb>
;zdE{ ul'
&a57hӜϲ	l~X_o"߷c/1$͂S#2ǜ-wSz>	?_Ο߳xu|OЦCt▁<ݲ*78>/g4)4z_N.)Ä
l5Ru$`0kRxn*vӱ!|uu,zwqa+*T1zJxTk#
[A{[8$>}DfQ+$U	/@qi֢c2Dlo!\o`{EA.nH
M}SO%'WΜ%bfMKneh#5U0RB	r
x
WZlZix1Q7f؉wH޴
p*}P6m_ݻy31Q#S$tBw.ؑ7zWѸJ֋4yf ';r#{"kKYWlY>Bš TBv@<v+EK[Y|E~FBf(w퍡os^#3qٽWqZS8E"a=)P=\*Xo3-6<}Lx@HCx$IΕ'#[}G>e%؆͐˩W\Hi=ϓGȫCeR#wV__O#3x8؀D+'r:>9y{CMy	)2HD&|wrE<jnf"&&UkÉJEB߻>Zrh;Ʉ@?;ow3j8u~pWIP1q#>9y-!
n6~ğ{?EXK=Pzx J+sঽc_	zP?C'kYc
GHfbZ)qe}G|&iG}gJN1NƂR<®q|;s=4%GL.S}Mm'l٪AV?PK!G܍)poetry/__init__.pyPK!);[[poetry/__main__.pyPK!Spoetry/__version__.pyPK!뚩poetry/_vendor/.gitignorePK!2LCC-poetry/config.pyPK!RR
poetry/console/__init__.pyPK!
+q(poetry/console/application.pyPK!*HH#D!poetry/console/commands/__init__.pyPK!Rs߯ #poetry/console/commands/about.pyPK!+C%poetry/console/commands/add.pyPK!Nj} 9poetry/console/commands/build.pyPK!Hu%%)s<poetry/console/commands/cache/__init__.pyPK!Ց&<poetry/console/commands/cache/clear.pyPK!FF Epoetry/console/commands/check.pyPK!1"Q	Q	"8Gpoetry/console/commands/command.pyPK!-3!Ppoetry/console/commands/config.pyPK!LL)mpoetry/console/commands/debug/__init__.pyPK!NP![%npoetry/console/commands/debug/info.pyPK!~Ӛ,,(gtpoetry/console/commands/debug/resolve.pyPK!X466"قpoetry/console/commands/develop.pyPK!>(>(Opoetry/console/commands/init.pyPK!'GEE"ʰpoetry/console/commands/install.pyPK!ROpoetry/console/commands/lock.pyPK!I
poetry/console/commands/new.pyPK!2"poetry/console/commands/publish.pyPK!J		!poetry/console/commands/remove.pyPK!35poetry/console/commands/run.pyPK!!F!poetry/console/commands/script.pyPK!@zII!poetry/console/commands/search.pyPK!Z&&(Gpoetry/console/commands/self/__init__.pyPK!Ez&poetry/console/commands/self/update.pyPK!b(,,poetry/console/commands/show.pyPK!?X!'poetry/console/commands/update.pyPK!cضOO'.,poetry/console/commands/venv_command.pyPK!<		".poetry/console/commands/version.pyPK!!8poetry/console/styles/__init__.pyPK!J<8poetry/console/styles/poetry.pyPK!=N2cc<poetry/exceptions.pyPK!!!=poetry/installation/__init__.pyPK!q%=poetry/installation/base_installer.pyPK!߁2F2F ?poetry/installation/installer.pyPK!umm%{poetry/installation/noop_installer.pyPK!gmZJ$+poetry/installation/pip_installer.pyPK!Ypoetry/io/__init__.pyPK!}dpoetry/io/null_io.pyPK!66;poetry/io/raw_argv_input.pyPK!poetry/json/__init__.pyPK!`--&ߘpoetry/json/schemas/poetry-schema.jsonPK!FFpoetry/layouts/__init__.pyPK!

<poetry/layouts/layout.pyPK!
ѺUpoetry/layouts/src.pyPK!ѐpoetry/layouts/standard.pyPK!B)&*poetry/locations.pyPK!ΨDypoetry/masonry/__init__.pyPK!2poetry/masonry/api.pyPK!O366poetry/masonry/builder.pyPK!i>$ff#&poetry/masonry/builders/__init__.pyPK!R&"poetry/masonry/builders/builder.pyPK!n[[#poetry/masonry/builders/complete.pyPK!)/*/* Rpoetry/masonry/builders/sdist.pyPK!7X++ 'poetry/masonry/builders/wheel.pyPK!J	J	Spoetry/masonry/metadata.pyPK!5k!!%i]poetry/masonry/publishing/__init__.pyPK!%8TT&]poetry/masonry/publishing/publisher.pyPK!I0"0"%eipoetry/masonry/publishing/uploader.pyPK! ؋poetry/masonry/utils/__init__.pyPK!ƨC88poetry/masonry/utils/helpers.pyPK!,(Ϣpoetry/masonry/utils/module.pyPK!yP<<ipoetry/masonry/utils/tags.pyPK!߬poetry/mixology/__init__.pyPK!Jpoetry/mixology/assignment.pyPK!;{%%@poetry/mixology/failure.pyPK!!;;"$poetry/mixology/incompatibility.pyPK!ꗺ(Bpoetry/mixology/incompatibility_cause.pyPK!m0#Bpoetry/mixology/partial_solution.pyPK!⟶eeJ9poetry/mixology/result.pyPK!:m	:poetry/mixology/set_relation.pyPK!]}((;poetry/mixology/term.pyPK!EE!.Spoetry/mixology/version_solver.pyPK!rR:poetry/packages/__init__.pyPK!'poetry/packages/constraints/__init__.pyPK!252.ҫpoetry/packages/constraints/base_constraint.pyPK!q/ƭpoetry/packages/constraints/empty_constraint.pyPK!"31ܯpoetry/packages/constraints/generic_constraint.pyPK!a/9poetry/packages/constraints/multi_constraint.pyPK!-EE2poetry/packages/constraints/wildcard_constraint.pyPK!2-t""#poetry/packages/dependency.pyPK!rjii'_poetry/packages/directory_dependency.pyPK!h["
poetry/packages/file_dependency.pyPK!ش	poetry/packages/locker.pyPK!OA$$!poetry/packages/package.pyPK!zm"Fpoetry/packages/project_package.pyPK!!Gpoetry/packages/utils/__init__.pyPK!1&Hpoetry/packages/utils/link.pyPK!)~
~
OZpoetry/packages/utils/utils.pyPK!ٺg!	hpoetry/packages/vcs_dependency.pyPK!)npoetry/poetry.pyPK!-;}Spoetry/puzzle/__init__.pyPK!#poetry/puzzle/dependencies.pyPK!Npoetry/puzzle/exceptions.pyPK!,MYY$poetry/puzzle/operations/__init__.pyPK!h9ii#6poetry/puzzle/operations/install.pyPK!ť%poetry/puzzle/operations/operation.pyPK!^Ntt%poetry/puzzle/operations/uninstall.pyPK!Y%{ZZ"poetry/puzzle/operations/update.pyPK![11Upoetry/puzzle/provider.pyPK!8kW  (poetry/puzzle/solver.pyPK!y::}poetry/repositories/__init__.pyPK!
uKK&poetry/repositories/base_repository.pyPK!Znn+poetry/repositories/installed_repository.pyPK!'VE/(/((:poetry/repositories/legacy_repository.pyPK!41jSsspoetry/repositories/pool.pyPK! m7>7>&[#poetry/repositories/pypi_repository.pyPK!-NN!apoetry/repositories/repository.pyPK!t'cnpoetry/semver/__init__.pyPK!Du-22!lpoetry/semver/empty_constraint.pyPK!ɕ݁poetry/semver/patterns.pyPK! ++poetry/semver/version.pyPK!#OO#poetry/semver/version_constraint.pyPK!^22poetry/semver/version_range.pyPK!FFhpoetry/semver/version_union.pyPK!A
poetry/spdx/__init__.pyPK!=|uupoetry/spdx/data/licenses.jsonPK!ja~poetry/spdx/license.pyPK!XI.ԓpoetry/spdx/updater.pyPK!kpoetry/toml/__init__.pyPK!	Ypoetry/toml/array.pyPK!goLpoetry/toml/cascadedict.pyPK!oypoetry/toml/freshtable.pyPK!ypoetry/toml/peekableit.pyPK!M Qpoetry/toml/prettify/__init__.pyPK!s
(7 Gpoetry/toml/prettify/_version.pyPK!css)poetry/toml/prettify/elements/__init__.pyPK!&		.Rpoetry/toml/prettify/elements/abstracttable.pyPK!:10&Rpoetry/toml/prettify/elements/array.pyPK!++'poetry/toml/prettify/elements/atomic.pyPK!E]

'poetry/toml/prettify/elements/common.pyPK!o敩``'zpoetry/toml/prettify/elements/errors.pyPK!t::(poetry/toml/prettify/elements/factory.pyPK!SX,poetry/toml/prettify/elements/inlinetable.pyPK!kD
D
)poetry/toml/prettify/elements/metadata.pyPK!|&Mpoetry/toml/prettify/elements/table.pyPK!T,H(poetry/toml/prettify/elements/tableheader.pyPK!37poetry/toml/prettify/elements/traversal/__init__.pyPK!|Zff5Rpoetry/toml/prettify/elements/traversal/predicates.pyPK!yXpoetry/toml/prettify/errors.pyPK!&Zpoetry/toml/prettify/lexer/__init__.pyPK!P]'''mpoetry/toml/prettify/parser/__init__.pyPK!OpPXX/%qpoetry/toml/prettify/parser/elementsanitizer.pyPK!:*%ypoetry/toml/prettify/parser/errors.pyPK!w88%{poetry/toml/prettify/parser/parser.pyPK!&poetry/toml/prettify/parser/recdesc.pyPK!7WW*}poetry/toml/prettify/parser/tokenstream.pyPK!̚'poetry/toml/prettify/tokens/__init__.pyPK!V%poetry/toml/prettify/tokens/errors.pyPK!)Mw&poetry/toml/prettify/tokens/py2toml.pyPK!i1&Bpoetry/toml/prettify/tokens/toml2py.pyPK!oSpoetry/toml/prettify/util.pyPK!)poetry/toml/raw.pyPK!t=
poetry/toml/structurer.pyPK!p((((poetry/toml/toml_file.pyPK!;?ppOFpoetry/toml/toplevels.pyPK!Tpoetry/utils/__init__.pyPK!iB+Upoetry/utils/_compat.pyPK!