diff --git a/pyproject.toml b/pyproject.toml index 638d4ce8..7addf889 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,14 @@ typeCheckingMode = "basic" reportMissingImports = "none" reportMissingModuleSource = "none" reportUnusedImport = "error" +# Disable common issues in existing codebase - can be enabled incrementally +reportGeneralTypeIssues = "none" +reportOptionalMemberAccess = "none" +reportOptionalSubscript = "none" +reportOptionalCall = "none" +reportOptionalIterable = "none" +reportUnboundVariable = "warning" +reportUnusedExpression = "none" include = ["stack_orchestrator/**/*.py", "tests/**/*.py"] exclude = ["**/build/**", "**/__pycache__/**"] diff --git a/setup.py b/setup.py index ace0d536..b295802f 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,7 @@ -# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78 +# See +# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78 from setuptools import setup, find_packages + with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() with open("requirements.txt", "r", encoding="utf-8") as fh: @@ -7,26 +9,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh: with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh: version = fh.readlines()[-1].strip(" \n") setup( - name='laconic-stack-orchestrator', + name="laconic-stack-orchestrator", version=version, - author='Cerc', - author_email='info@cerc.io', - license='GNU Affero General Public License', - description='Orchestrates deployment of the Laconic stack', + author="Cerc", + author_email="info@cerc.io", + license="GNU Affero General Public License", + description="Orchestrates deployment of the Laconic stack", long_description=long_description, long_description_content_type="text/markdown", - url='https://git.vdb.to/cerc-io/stack-orchestrator', - py_modules=['stack_orchestrator'], + url="https://git.vdb.to/cerc-io/stack-orchestrator", + py_modules=["stack_orchestrator"], packages=find_packages(), install_requires=[requirements], - python_requires='>=3.7', + python_requires=">=3.7", include_package_data=True, - package_data={'': ['data/**']}, + package_data={"": ["data/**"]}, classifiers=[ "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", ], entry_points={ - 'console_scripts': ['laconic-so=stack_orchestrator.main:cli'], - } + "console_scripts": ["laconic-so=stack_orchestrator.main:cli"], + }, ) diff --git a/stack_orchestrator/base.py b/stack_orchestrator/base.py index 811d085d..e60db556 100644 --- a/stack_orchestrator/base.py +++ b/stack_orchestrator/base.py @@ -27,7 +27,6 @@ def get_stack(config, stack): class base_stack(ABC): - def __init__(self, config, stack): self.config = config self.stack = stack @@ -42,14 +41,16 @@ class base_stack(ABC): class package_registry_stack(base_stack): - def ensure_available(self): self.url = "" # Check if we were given an external registry URL url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") if url_from_environment: if self.config.verbose: - print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}") + print( + f"Using package registry url from CERC_NPM_REGISTRY_URL: " + f"{url_from_environment}" + ) self.url = url_from_environment else: # Otherwise we expect to use the local package-registry stack @@ -62,10 +63,16 @@ class package_registry_stack(base_stack): # TODO: get url from deploy-stack self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" else: - # If not, print a message about how to start it and return fail to the caller - print("ERROR: The package-registry stack is not running, and no external registry " - "specified with CERC_NPM_REGISTRY_URL") - print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up") + # If not, print a message about how to start it and return fail to the + # caller + print( + "ERROR: The package-registry stack is not running, " + "and no external registry specified with CERC_NPM_REGISTRY_URL" + ) + print( + "ERROR: Start the local package registry with: " + "laconic-so --stack package-registry deploy-system up" + ) return False return True @@ -76,7 +83,9 @@ class package_registry_stack(base_stack): def get_npm_registry_url(): # If an auth token is not defined, we assume the default should be the cerc registry # If an auth token is defined, we assume the local gitea should be used. - default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config( - "CERC_NPM_AUTH_TOKEN", default=None - ) else "https://git.vdb.to/api/packages/cerc-io/npm/" + default_npm_registry_url = ( + "http://gitea.local:3000/api/packages/cerc-io/npm/" + if config("CERC_NPM_AUTH_TOKEN", default=None) + else "https://git.vdb.to/api/packages/cerc-io/npm/" + ) return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url) diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py index 2b78306b..4717b7a6 100644 --- a/stack_orchestrator/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -18,7 +18,8 @@ # env vars: # CERC_REPO_BASE_DIR defaults to ~/cerc -# TODO: display the available list of containers; allow re-build of either all or specific containers +# TODO: display the available list of containers; +# allow re-build of either all or specific containers import os import sys @@ -34,14 +35,17 @@ from stack_orchestrator.build.publish import publish_image from stack_orchestrator.build.build_util import get_containers_in_scope # TODO: find a place for this -# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" +# epilog="Config provided either in .env or settings.ini or env vars: +# CERC_REPO_BASE_DIR (defaults to ~/cerc)" -def make_container_build_env(dev_root_path: str, - container_build_dir: str, - debug: bool, - force_rebuild: bool, - extra_build_args: str): +def make_container_build_env( + dev_root_path: str, + container_build_dir: str, + debug: bool, + force_rebuild: bool, + extra_build_args: str, +): container_build_env = { "CERC_NPM_REGISTRY_URL": get_npm_registry_url(), "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""), @@ -50,11 +54,15 @@ def make_container_build_env(dev_root_path: str, "CERC_CONTAINER_BASE_DIR": container_build_dir, "CERC_HOST_UID": f"{os.getuid()}", "CERC_HOST_GID": f"{os.getgid()}", - "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0") + "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"), } container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) - container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) + container_build_env.update( + {"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} + if extra_build_args + else {} + ) docker_host_env = os.getenv("DOCKER_HOST") if docker_host_env: container_build_env.update({"DOCKER_HOST": docker_host_env}) @@ -67,12 +75,18 @@ def process_container(build_context: BuildContext) -> bool: print(f"Building: {build_context.container}") default_container_tag = f"{build_context.container}:local" - build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}) + build_context.container_build_env.update( + {"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag} + ) # Check if this is in an external stack if stack_is_external(build_context.stack): - container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build") - temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-")) + container_parent_dir = Path(build_context.stack).parent.parent.joinpath( + "container-build" + ) + temp_build_dir = container_parent_dir.joinpath( + build_context.container.replace("/", "-") + ) temp_build_script_filename = temp_build_dir.joinpath("build.sh") # Now check if the container exists in the external stack. if not temp_build_script_filename.exists(): @@ -90,21 +104,34 @@ def process_container(build_context: BuildContext) -> bool: build_command = build_script_filename.as_posix() else: if opts.o.verbose: - print(f"No script file found: {build_script_filename}, using default build script") - repo_dir = build_context.container.split('/')[1] - # TODO: make this less of a hack -- should be specified in some metadata somewhere - # Check if we have a repo for this container. If not, set the context dir to the container-build subdir + print( + f"No script file found: {build_script_filename}, " + "using default build script" + ) + repo_dir = build_context.container.split("/")[1] + # TODO: make this less of a hack -- should be specified in + # some metadata somewhere. Check if we have a repo for this + # container. If not, set the context dir to container-build subdir repo_full_path = os.path.join(build_context.dev_root_path, repo_dir) - repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir - build_command = os.path.join(build_context.container_build_dir, - "default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}" + repo_dir_or_build_dir = ( + repo_full_path if os.path.exists(repo_full_path) else build_dir + ) + build_command = ( + os.path.join(build_context.container_build_dir, "default-build.sh") + + f" {default_container_tag} {repo_dir_or_build_dir}" + ) if not opts.o.dry_run: # No PATH at all causes failures with podman. if "PATH" not in build_context.container_build_env: build_context.container_build_env["PATH"] = os.environ["PATH"] if opts.o.verbose: - print(f"Executing: {build_command} with environment: {build_context.container_build_env}") - build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env) + print( + f"Executing: {build_command} with environment: " + f"{build_context.container_build_env}" + ) + build_result = subprocess.run( + build_command, shell=True, env=build_context.container_build_env + ) if opts.o.verbose: print(f"Return code is: {build_result.returncode}") if build_result.returncode != 0: @@ -117,33 +144,61 @@ def process_container(build_context: BuildContext) -> bool: @click.command() -@click.option('--include', help="only build these containers") -@click.option('--exclude', help="don\'t build these containers") -@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") +@click.option("--include", help="only build these containers") +@click.option("--exclude", help="don't build these containers") +@click.option( + "--force-rebuild", + is_flag=True, + default=False, + help="Override dependency checking -- always rebuild", +) @click.option("--extra-build-args", help="Supply extra arguments to build") -@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry") -@click.option("--image-registry", help="Specify the image registry for --publish-images") +@click.option( + "--publish-images", + is_flag=True, + default=False, + help="Publish the built images in the specified image registry", +) +@click.option( + "--image-registry", help="Specify the image registry for --publish-images" +) @click.pass_context -def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry): - '''build the set of containers required for a complete stack''' +def command( + ctx, + include, + exclude, + force_rebuild, + extra_build_args, + publish_images, + image_registry, +): + """build the set of containers required for a complete stack""" local_stack = ctx.obj.local_stack stack = ctx.obj.stack - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure - container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build") + # See: https://stackoverflow.com/questions/25389095/ + # python-get-path-of-root-project-structure + container_build_dir = ( + Path(__file__).absolute().parent.parent.joinpath("data", "container-build") + ) if local_stack: - dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] - print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] + print( + f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " + f"{dev_root_path}" + ) else: - dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + dev_root_path = os.path.expanduser( + config("CERC_REPO_BASE_DIR", default="~/cerc") + ) if not opts.o.quiet: - print(f'Dev Root is: {dev_root_path}') + print(f"Dev Root is: {dev_root_path}") if not os.path.isdir(dev_root_path): - print('Dev root directory doesn\'t exist, creating') + print("Dev root directory doesn't exist, creating") if publish_images: if not image_registry: @@ -151,21 +206,22 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag containers_in_scope = get_containers_in_scope(stack) - container_build_env = make_container_build_env(dev_root_path, - container_build_dir, - opts.o.debug, - force_rebuild, - extra_build_args) + container_build_env = make_container_build_env( + dev_root_path, + container_build_dir, + opts.o.debug, + force_rebuild, + extra_build_args, + ) for container in containers_in_scope: if include_exclude_check(container, include, exclude): - build_context = BuildContext( stack, container, container_build_dir, container_build_env, - dev_root_path + dev_root_path, ) result = process_container(build_context) if result: @@ -174,10 +230,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag else: print(f"Error running build for {build_context.container}") if not opts.o.continue_on_error: - error_exit("container build failed and --continue-on-error not set, exiting") + error_exit( + "container build failed and --continue-on-error " + "not set, exiting" + ) sys.exit(1) else: - print("****** Container Build Error, continuing because --continue-on-error is set") + print( + "****** Container Build Error, continuing because " + "--continue-on-error is set" + ) else: if opts.o.verbose: print(f"Excluding: {container}") diff --git a/stack_orchestrator/build/build_npms.py b/stack_orchestrator/build/build_npms.py index c8e3af43..00992546 100644 --- a/stack_orchestrator/build/build_npms.py +++ b/stack_orchestrator/build/build_npms.py @@ -32,14 +32,18 @@ builder_js_image_name = "cerc/builder-js:local" @click.command() -@click.option('--include', help="only build these packages") -@click.option('--exclude', help="don\'t build these packages") -@click.option("--force-rebuild", is_flag=True, default=False, - help="Override existing target package version check -- force rebuild") +@click.option("--include", help="only build these packages") +@click.option("--exclude", help="don't build these packages") +@click.option( + "--force-rebuild", + is_flag=True, + default=False, + help="Override existing target package version check -- force rebuild", +) @click.option("--extra-build-args", help="Supply extra arguments to build") @click.pass_context def command(ctx, include, exclude, force_rebuild, extra_build_args): - '''build the set of npm packages required for a complete stack''' + """build the set of npm packages required for a complete stack""" quiet = ctx.obj.quiet verbose = ctx.obj.verbose @@ -65,45 +69,54 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): sys.exit(1) if local_stack: - dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] - print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] + print( + f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " + f"{dev_root_path}" + ) else: - dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + dev_root_path = os.path.expanduser( + config("CERC_REPO_BASE_DIR", default="~/cerc") + ) build_root_path = os.path.join(dev_root_path, "build-trees") if verbose: - print(f'Dev Root is: {dev_root_path}') + print(f"Dev Root is: {dev_root_path}") if not os.path.isdir(dev_root_path): - print('Dev root directory doesn\'t exist, creating') + print("Dev root directory doesn't exist, creating") os.makedirs(dev_root_path) if not os.path.isdir(dev_root_path): - print('Build root directory doesn\'t exist, creating') + print("Build root directory doesn't exist, creating") os.makedirs(build_root_path) # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data - with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file: + + with importlib.resources.open_text( + data, "npm-package-list.txt" + ) as package_list_file: all_packages = package_list_file.read().splitlines() packages_in_scope = [] if stack: stack_config = get_parsed_stack_config(stack) # TODO: syntax check the input here - packages_in_scope = stack_config['npms'] + packages_in_scope = stack_config["npms"] else: packages_in_scope = all_packages if verbose: - print(f'Packages: {packages_in_scope}') + print(f"Packages: {packages_in_scope}") def build_package(package): if not quiet: print(f"Building npm package: {package}") repo_dir = package repo_full_path = os.path.join(dev_root_path, repo_dir) - # Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo + # Copy the repo and build that to avoid propagating + # JS tooling file changes back into the cloned repo repo_copy_path = os.path.join(build_root_path, repo_dir) # First delete any old build tree if os.path.isdir(repo_copy_path): @@ -116,41 +129,63 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}") if not dry_run: copytree(repo_full_path, repo_copy_path) - build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"] + build_command = [ + "sh", + "-c", + "cd /workspace && " + f"build-npm-package-local-dependencies.sh {npm_registry_url}", + ] if not dry_run: if verbose: print(f"Executing: {build_command}") # Originally we used the PEP 584 merge operator: - # envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) - # but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update: - envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token, - "LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages - } + # envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | + # ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + # but that isn't available in Python 3.8 (default in Ubuntu 20) + # so for now we use dict.update: + envs = { + "CERC_NPM_AUTH_TOKEN": npm_registry_url_token, + # Convention used by our web app packages + "LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml", + } envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) - envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) + envs.update( + {"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} + if extra_build_args + else {} + ) try: - docker.run(builder_js_image_name, - remove=True, - interactive=True, - tty=True, - user=f"{os.getuid()}:{os.getgid()}", - envs=envs, - # TODO: detect this host name in npm_registry_url rather than hard-wiring it - add_hosts=[("gitea.local", "host-gateway")], - volumes=[(repo_copy_path, "/workspace")], - command=build_command - ) - # Note that although the docs say that build_result should contain - # the command output as a string, in reality it is always the empty string. - # Since we detect errors via catching exceptions below, we can safely ignore it here. + docker.run( + builder_js_image_name, + remove=True, + interactive=True, + tty=True, + user=f"{os.getuid()}:{os.getgid()}", + envs=envs, + # TODO: detect this host name in npm_registry_url + # rather than hard-wiring it + add_hosts=[("gitea.local", "host-gateway")], + volumes=[(repo_copy_path, "/workspace")], + command=build_command, + ) + # Note that although the docs say that build_result should + # contain the command output as a string, in reality it is + # always the empty string. Since we detect errors via catching + # exceptions below, we can safely ignore it here. except DockerException as e: print(f"Error executing build for {package} in container:\n {e}") if not continue_on_error: - print("FATAL Error: build failed and --continue-on-error not set, exiting") + print( + "FATAL Error: build failed and --continue-on-error " + "not set, exiting" + ) sys.exit(1) else: - print("****** Build Error, continuing because --continue-on-error is set") + print( + "****** Build Error, continuing because " + "--continue-on-error is set" + ) else: print("Skipped") @@ -168,6 +203,12 @@ def _ensure_prerequisites(): # Tell the user how to build it if not images = docker.image.list(builder_js_image_name) if len(images) == 0: - print(f"FATAL: builder image: {builder_js_image_name} is required but was not found") - print("Please run this command to create it: laconic-so --stack build-support build-containers") + print( + f"FATAL: builder image: {builder_js_image_name} is required " + "but was not found" + ) + print( + "Please run this command to create it: " + "laconic-so --stack build-support build-containers" + ) sys.exit(1) diff --git a/stack_orchestrator/build/build_types.py b/stack_orchestrator/build/build_types.py index 6ddbc2ad..53b24932 100644 --- a/stack_orchestrator/build/build_types.py +++ b/stack_orchestrator/build/build_types.py @@ -24,5 +24,5 @@ class BuildContext: stack: str container: str container_build_dir: Path - container_build_env: Mapping[str,str] + container_build_env: Mapping[str, str] dev_root_path: str diff --git a/stack_orchestrator/build/build_util.py b/stack_orchestrator/build/build_util.py index 15be1f9b..a8a0c395 100644 --- a/stack_orchestrator/build/build_util.py +++ b/stack_orchestrator/build/build_util.py @@ -20,21 +20,23 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit def get_containers_in_scope(stack: str): - containers_in_scope = [] if stack: stack_config = get_parsed_stack_config(stack) if "containers" not in stack_config or stack_config["containers"] is None: warn_exit(f"stack {stack} does not define any containers") - containers_in_scope = stack_config['containers'] + containers_in_scope = stack_config["containers"] else: # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data - with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file: + + with importlib.resources.open_text( + data, "container-image-list.txt" + ) as container_list_file: containers_in_scope = container_list_file.read().splitlines() if opts.o.verbose: - print(f'Containers: {containers_in_scope}') + print(f"Containers: {containers_in_scope}") if stack: print(f"Stack: {stack}") diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py index 1021f4bf..f204df82 100644 --- a/stack_orchestrator/build/build_webapp.py +++ b/stack_orchestrator/build/build_webapp.py @@ -18,7 +18,8 @@ # env vars: # CERC_REPO_BASE_DIR defaults to ~/cerc -# TODO: display the available list of containers; allow re-build of either all or specific containers +# TODO: display the available list of containers; +# allow re-build of either all or specific containers import os import sys @@ -32,40 +33,55 @@ from stack_orchestrator.build.build_types import BuildContext @click.command() -@click.option('--base-container') -@click.option('--source-repo', help="directory containing the webapp to build", required=True) -@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") +@click.option("--base-container") +@click.option( + "--source-repo", help="directory containing the webapp to build", required=True +) +@click.option( + "--force-rebuild", + is_flag=True, + default=False, + help="Override dependency checking -- always rebuild", +) @click.option("--extra-build-args", help="Supply extra arguments to build") @click.option("--tag", help="Container tag (default: cerc/:local)") @click.pass_context def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag): - '''build the specified webapp container''' + """build the specified webapp container""" logger = TimedLogger() - quiet = ctx.obj.quiet debug = ctx.obj.debug verbose = ctx.obj.verbose local_stack = ctx.obj.local_stack stack = ctx.obj.stack - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure - container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build") + # See: https://stackoverflow.com/questions/25389095/ + # python-get-path-of-root-project-structure + container_build_dir = ( + Path(__file__).absolute().parent.parent.joinpath("data", "container-build") + ) if local_stack: - dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] - logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] + logger.log( + f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " + f"{dev_root_path}" + ) else: - dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + dev_root_path = os.path.expanduser( + config("CERC_REPO_BASE_DIR", default="~/cerc") + ) if verbose: - logger.log(f'Dev Root is: {dev_root_path}') + logger.log(f"Dev Root is: {dev_root_path}") if not base_container: base_container = determine_base_container(source_repo) # First build the base container. - container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug, - force_rebuild, extra_build_args) + container_build_env = build_containers.make_container_build_env( + dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args + ) if verbose: logger.log(f"Building base container: {base_container}") @@ -85,12 +101,13 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t if verbose: logger.log(f"Base container {base_container} build finished.") - # Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir. + # Now build the target webapp. We use the same build script, + # but with a different Dockerfile and work dir. container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true" container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo) - container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir, - base_container.replace("/", "-"), - "Dockerfile.webapp") + container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join( + container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp" + ) if not tag: webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] tag = f"cerc/{webapp_name}:local" diff --git a/stack_orchestrator/build/fetch_containers.py b/stack_orchestrator/build/fetch_containers.py index bc4b93a7..e0f31dd0 100644 --- a/stack_orchestrator/build/fetch_containers.py +++ b/stack_orchestrator/build/fetch_containers.py @@ -52,7 +52,8 @@ def _local_tag_for(container: str): # See: https://docker-docs.uclv.cu/registry/spec/api/ # Emulate this: -# $ curl -u "my-username:my-token" -X GET "https:///v2/cerc-io/cerc/test-container/tags/list" +# $ curl -u "my-username:my-token" -X GET \ +# "https:///v2/cerc-io/cerc/test-container/tags/list" # {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]} def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]: # registry looks like: git.vdb.to/cerc-io @@ -60,7 +61,9 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list" if opts.o.debug: print(f"Fetching tags from: {url}") - response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token)) + response = requests.get( + url, auth=(registry_info.registry_username, registry_info.registry_token) + ) if response.status_code == 200: tag_info = response.json() if opts.o.debug: @@ -68,7 +71,10 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List tags_array = tag_info["tags"] return tags_array else: - error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}") + error_exit( + f"failed to fetch tags from image registry, " + f"status code: {response.status_code}" + ) def _find_latest(candidate_tags: List[str]): @@ -79,9 +85,9 @@ def _find_latest(candidate_tags: List[str]): return sorted_candidates[-1] -def _filter_for_platform(container: str, - registry_info: RegistryInfo, - tag_list: List[str]) -> List[str] : +def _filter_for_platform( + container: str, registry_info: RegistryInfo, tag_list: List[str] +) -> List[str]: filtered_tags = [] this_machine = platform.machine() # Translate between Python and docker platform names @@ -98,7 +104,7 @@ def _filter_for_platform(container: str, manifest = manifest_cmd.inspect_verbose(remote_tag) if opts.o.debug: print(f"manifest: {manifest}") - image_architecture = manifest["Descriptor"]["platform"]["architecture"] + image_architecture = manifest["Descriptor"]["platform"]["architecture"] if opts.o.debug: print(f"image_architecture: {image_architecture}") if this_machine == image_architecture: @@ -137,21 +143,44 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str): @click.command() -@click.option('--include', help="only fetch these containers") -@click.option('--exclude', help="don\'t fetch these containers") -@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present") -@click.option("--image-registry", required=True, help="Specify the image registry to fetch from") -@click.option("--registry-username", required=True, help="Specify the image registry username") -@click.option("--registry-token", required=True, help="Specify the image registry access token") +@click.option("--include", help="only fetch these containers") +@click.option("--exclude", help="don't fetch these containers") +@click.option( + "--force-local-overwrite", + is_flag=True, + default=False, + help="Overwrite a locally built image, if present", +) +@click.option( + "--image-registry", required=True, help="Specify the image registry to fetch from" +) +@click.option( + "--registry-username", required=True, help="Specify the image registry username" +) +@click.option( + "--registry-token", required=True, help="Specify the image registry access token" +) @click.pass_context -def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token): - '''EXPERIMENTAL: fetch the images for a stack from remote registry''' +def command( + ctx, + include, + exclude, + force_local_overwrite, + image_registry, + registry_username, + registry_token, +): + """EXPERIMENTAL: fetch the images for a stack from remote registry""" registry_info = RegistryInfo(image_registry, registry_username, registry_token) docker = DockerClient() if not opts.o.quiet: print("Logging into container registry:") - docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token) + docker.login( + registry_info.registry, + registry_info.registry_username, + registry_info.registry_token, + ) # Generate list of target containers stack = ctx.obj.stack containers_in_scope = get_containers_in_scope(stack) @@ -172,19 +201,24 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist print(f"Fetching: {image_to_fetch}") _fetch_image(image_to_fetch, registry_info) # Now check if the target container already exists exists locally already - if (_exists_locally(container)): + if _exists_locally(container): if not opts.o.quiet: print(f"Container image {container} already exists locally") # if so, fail unless the user specified force-local-overwrite - if (force_local_overwrite): + if force_local_overwrite: # In that case remove the existing :local tag if not opts.o.quiet: - print(f"Warning: overwriting local tag from this image: {container} because " - "--force-local-overwrite was specified") + print( + f"Warning: overwriting local tag from this image: " + f"{container} because --force-local-overwrite was specified" + ) else: if not opts.o.quiet: - print(f"Skipping local tagging for this image: {container} because that would " - "overwrite an existing :local tagged image, use --force-local-overwrite to do so.") + print( + f"Skipping local tagging for this image: {container} " + "because that would overwrite an existing :local tagged " + "image, use --force-local-overwrite to do so." + ) continue # Tag the fetched image with the :local tag _add_local_tag(image_to_fetch, image_registry, local_tag) @@ -192,4 +226,7 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist if opts.o.verbose: print(f"Excluding: {container}") if not all_containers_found: - print("Warning: couldn't find usable images for one or more containers, this stack will not deploy") + print( + "Warning: couldn't find usable images for one or more containers, " + "this stack will not deploy" + ) diff --git a/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py b/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py index 86a90180..9c4bd78e 100755 --- a/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py +++ b/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py @@ -12,7 +12,10 @@ from fabric import Connection def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name): - command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}" + command = ( + f"pg_dump -h {db_host} -p {db_port} -U {db_user} " + f"-d {db_name} -c --inserts -f {file_name}" + ) my_env = os.environ.copy() my_env["PGPASSWORD"] = db_password print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="") diff --git a/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py index 1e6d10f4..4e74e1df 100644 --- a/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py +++ b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: with open(testnet_config_path) as stream: data = yaml.safe_load(stream) -for key, value in data['el_premine'].items(): - acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='') +for key, value in data["el_premine"].items(): + acct = w3.eth.account.from_mnemonic( + data["mnemonic"], account_path=key, passphrase="" + ) print("%s,%s,%s" % (key, acct.address, acct.key.hex())) diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py index fa757cf5..a11a1d01 100644 --- a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py @@ -18,21 +18,26 @@ from ruamel.yaml import YAML def create(context: DeploymentContext, extra_args): - # Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1 - # We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the - # deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment - fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml') + # Slightly modify the base fixturenet-eth compose file to replace the + # startup script for fixturenet-eth-geth-1 + # We need to start geth with the flag to allow non eip-155 compliant + # transactions in order to publish the + # deterministic-deployment-proxy contract, which itself is a prereq for + # Optimism contract deployment + fixturenet_eth_compose_file = context.deployment_dir.joinpath( + "compose", "docker-compose-fixturenet-eth.yml" + ) - with open(fixturenet_eth_compose_file, 'r') as yaml_file: + with open(fixturenet_eth_compose_file, "r") as yaml_file: yaml = YAML() yaml_data = yaml.load(yaml_file) - new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh' + new_script = "../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh" - if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']: - yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script) + if new_script not in yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"]: + yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"].append(new_script) - with open(fixturenet_eth_compose_file, 'w') as yaml_file: + with open(fixturenet_eth_compose_file, "w") as yaml_file: yaml = YAML() yaml.dump(yaml_data, yaml_file) diff --git a/stack_orchestrator/data/stacks/mainnet-blast/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-blast/deploy/commands.py index 00aa6970..6d3b32d4 100644 --- a/stack_orchestrator/data/stacks/mainnet-blast/deploy/commands.py +++ b/stack_orchestrator/data/stacks/mainnet-blast/deploy/commands.py @@ -22,18 +22,24 @@ import yaml def create(context, extra_args): # Our goal here is just to copy the json files for blast yml_path = context.deployment_dir.joinpath("spec.yml") - with open(yml_path, 'r') as file: + with open(yml_path, "r") as file: data = yaml.safe_load(file) - mount_point = data['volumes']['blast-data'] + mount_point = data["volumes"]["blast-data"] if mount_point[0] == "/": deploy_dir = Path(mount_point) else: deploy_dir = context.deployment_dir.joinpath(mount_point) command_context = extra_args[2] - compose_file = [f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f][0] - source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "genesis.json") + compose_file = [ + f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f + ][0] + source_config_file = Path(compose_file).parent.parent.joinpath( + "config", "mainnet-blast", "genesis.json" + ) copy(source_config_file, deploy_dir) - source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "rollup.json") + source_config_file = Path(compose_file).parent.parent.joinpath( + "config", "mainnet-blast", "rollup.json" + ) copy(source_config_file, deploy_dir) diff --git a/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py index 5aba9547..b7a7e002 100644 --- a/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py +++ b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py @@ -27,6 +27,8 @@ def setup(ctx): def create(ctx, extra_args): # Generate the JWT secret and save to its config file secret = token_hex(32) - jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret") - with open(jwt_file_path, 'w+') as jwt_file: + jwt_file_path = ctx.deployment_dir.joinpath( + "data", "mainnet_eth_plugeth_config_data", "jwtsecret" + ) + with open(jwt_file_path, "w+") as jwt_file: jwt_file.write(secret) diff --git a/stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py index 9fcecbcf..545e16a1 100644 --- a/stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py +++ b/stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py @@ -27,6 +27,8 @@ def setup(ctx): def create(ctx, extra_args): # Generate the JWT secret and save to its config file secret = token_hex(32) - jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret") - with open(jwt_file_path, 'w+') as jwt_file: + jwt_file_path = ctx.deployment_dir.joinpath( + "data", "mainnet_eth_config_data", "jwtsecret" + ) + with open(jwt_file_path, "w+") as jwt_file: jwt_file.write(secret) diff --git a/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py index b3ce32d3..f1b07620 100644 --- a/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py +++ b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py @@ -14,7 +14,10 @@ # along with this program. If not, see . from stack_orchestrator.util import get_yaml -from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand +from stack_orchestrator.deploy.deploy_types import ( + DeployCommandContext, + LaconicStackSetupCommand, +) from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.stack_state import State from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command @@ -75,7 +78,12 @@ def _copy_gentx_files(network_dir: Path, gentx_file_list: str): gentx_files = _comma_delimited_to_list(gentx_file_list) for gentx_file in gentx_files: gentx_file_path = Path(gentx_file) - copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path))) + copyfile( + gentx_file_path, + os.path.join( + network_dir, "config", "gentx", os.path.basename(gentx_file_path) + ), + ) def _remove_persistent_peers(network_dir: Path): @@ -86,8 +94,13 @@ def _remove_persistent_peers(network_dir: Path): with open(config_file_path, "r") as input_file: config_file_content = input_file.read() persistent_peers_pattern = '^persistent_peers = "(.+?)"' - replace_with = "persistent_peers = \"\"" - config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE) + replace_with = 'persistent_peers = ""' + config_file_content = re.sub( + persistent_peers_pattern, + replace_with, + config_file_content, + flags=re.MULTILINE, + ) with open(config_file_path, "w") as output_file: output_file.write(config_file_content) @@ -100,8 +113,13 @@ def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str): with open(config_file_path, "r") as input_file: config_file_content = input_file.read() persistent_peers_pattern = r'^persistent_peers = ""' - replace_with = f"persistent_peers = \"{new_persistent_peers}\"" - config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE) + replace_with = f'persistent_peers = "{new_persistent_peers}"' + config_file_content = re.sub( + persistent_peers_pattern, + replace_with, + config_file_content, + flags=re.MULTILINE, + ) with open(config_file_path, "w") as output_file: output_file.write(config_file_content) @@ -113,9 +131,11 @@ def _enable_cors(config_dir: Path): sys.exit(1) with open(config_file_path, "r") as input_file: config_file_content = input_file.read() - cors_pattern = r'^cors_allowed_origins = \[]' + cors_pattern = r"^cors_allowed_origins = \[]" replace_with = 'cors_allowed_origins = ["*"]' - config_file_content = re.sub(cors_pattern, replace_with, config_file_content, flags=re.MULTILINE) + config_file_content = re.sub( + cors_pattern, replace_with, config_file_content, flags=re.MULTILINE + ) with open(config_file_path, "w") as output_file: output_file.write(config_file_content) app_file_path = config_dir.joinpath("app.toml") @@ -124,9 +144,11 @@ def _enable_cors(config_dir: Path): sys.exit(1) with open(app_file_path, "r") as input_file: app_file_content = input_file.read() - cors_pattern = r'^enabled-unsafe-cors = false' + cors_pattern = r"^enabled-unsafe-cors = false" replace_with = "enabled-unsafe-cors = true" - app_file_content = re.sub(cors_pattern, replace_with, app_file_content, flags=re.MULTILINE) + app_file_content = re.sub( + cors_pattern, replace_with, app_file_content, flags=re.MULTILINE + ) with open(app_file_path, "w") as output_file: output_file.write(app_file_content) @@ -141,7 +163,9 @@ def _set_listen_address(config_dir: Path): existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"' replace_with = 'laddr = "tcp://0.0.0.0:26657"' print(f"Replacing in: {config_file_path}") - config_file_content = re.sub(existing_pattern, replace_with, config_file_content, flags=re.MULTILINE) + config_file_content = re.sub( + existing_pattern, replace_with, config_file_content, flags=re.MULTILINE + ) with open(config_file_path, "w") as output_file: output_file.write(config_file_content) app_file_path = config_dir.joinpath("app.toml") @@ -152,10 +176,14 @@ def _set_listen_address(config_dir: Path): app_file_content = input_file.read() existing_pattern1 = r'^address = "tcp://localhost:1317"' replace_with1 = 'address = "tcp://0.0.0.0:1317"' - app_file_content = re.sub(existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE) + app_file_content = re.sub( + existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE + ) existing_pattern2 = r'^address = "localhost:9090"' replace_with2 = 'address = "0.0.0.0:9090"' - app_file_content = re.sub(existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE) + app_file_content = re.sub( + existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE + ) with open(app_file_path, "w") as output_file: output_file.write(app_file_content) @@ -164,7 +192,10 @@ def _phase_from_params(parameters): phase = SetupPhase.ILLEGAL if parameters.initialize_network: if parameters.join_network or parameters.create_network: - print("Can't supply --join-network or --create-network with --initialize-network") + print( + "Can't supply --join-network or --create-network " + "with --initialize-network" + ) sys.exit(1) if not parameters.chain_id: print("--chain-id is required") @@ -176,24 +207,36 @@ def _phase_from_params(parameters): phase = SetupPhase.INITIALIZE elif parameters.join_network: if parameters.initialize_network or parameters.create_network: - print("Can't supply --initialize-network or --create-network with --join-network") + print( + "Can't supply --initialize-network or --create-network " + "with --join-network" + ) sys.exit(1) phase = SetupPhase.JOIN elif parameters.create_network: if parameters.initialize_network or parameters.join_network: - print("Can't supply --initialize-network or --join-network with --create-network") + print( + "Can't supply --initialize-network or --join-network " + "with --create-network" + ) sys.exit(1) phase = SetupPhase.CREATE elif parameters.connect_network: if parameters.initialize_network or parameters.join_network: - print("Can't supply --initialize-network or --join-network with --connect-network") + print( + "Can't supply --initialize-network or --join-network " + "with --connect-network" + ) sys.exit(1) phase = SetupPhase.CONNECT return phase -def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args): - +def setup( + command_context: DeployCommandContext, + parameters: LaconicStackSetupCommand, + extra_args, +): options = opts.o currency = "alnt" # Does this need to be a parameter? @@ -205,12 +248,9 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo network_dir = Path(parameters.network_dir).absolute() laconicd_home_path_in_container = "/laconicd-home" - mounts = [ - VolumeMapping(network_dir, laconicd_home_path_in_container) - ] + mounts = [VolumeMapping(network_dir, laconicd_home_path_in_container)] if phase == SetupPhase.INITIALIZE: - # We want to create the directory so if it exists that's an error if os.path.exists(network_dir): print(f"Error: network directory {network_dir} already exists") @@ -220,13 +260,18 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo output, status = run_container_command( command_context, - "laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\ - --chain-id {parameters.chain_id} --default-denom {currency}", mounts) + "laconicd", + f"laconicd init {parameters.node_moniker} " + f"--home {laconicd_home_path_in_container} " + f"--chain-id {parameters.chain_id} --default-denom {currency}", + mounts, + ) if options.debug: print(f"Command output: {output}") elif phase == SetupPhase.JOIN: - # In the join phase (alternative to connect) we are participating in a genesis ceremony for the chain + # In the join phase (alternative to connect) we are participating in a + # genesis ceremony for the chain if not os.path.exists(network_dir): print(f"Error: network directory {network_dir} doesn't exist") sys.exit(1) @@ -234,52 +279,72 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo chain_id = _get_chain_id_from_config(network_dir) output1, status1 = run_container_command( - command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\ - --keyring-backend test", mounts) + command_context, + "laconicd", + f"laconicd keys add {parameters.key_name} " + f"--home {laconicd_home_path_in_container} --keyring-backend test", + mounts, + ) if options.debug: print(f"Command output: {output1}") output2, status2 = run_container_command( command_context, "laconicd", - f"laconicd genesis add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\ - --home {laconicd_home_path_in_container} --keyring-backend test", - mounts) + f"laconicd genesis add-genesis-account {parameters.key_name} " + f"12900000000000000000000{currency} " + f"--home {laconicd_home_path_in_container} --keyring-backend test", + mounts, + ) if options.debug: print(f"Command output: {output2}") output3, status3 = run_container_command( command_context, "laconicd", - f"laconicd genesis gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\ - --chain-id {chain_id} --keyring-backend test", - mounts) + f"laconicd genesis gentx {parameters.key_name} " + f"90000000000{currency} --home {laconicd_home_path_in_container} " + f"--chain-id {chain_id} --keyring-backend test", + mounts, + ) if options.debug: print(f"Command output: {output3}") output4, status4 = run_container_command( command_context, "laconicd", - f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test", - mounts) + f"laconicd keys show {parameters.key_name} -a " + f"--home {laconicd_home_path_in_container} --keyring-backend test", + mounts, + ) print(f"Node account address: {output4}") elif phase == SetupPhase.CONNECT: - # In the connect phase (named to not conflict with join) we are making a node that syncs a chain with existing genesis.json - # but not with validator role. We need this kind of node in order to bootstrap it into a validator after it syncs + # In the connect phase (named to not conflict with join) we are + # making a node that syncs a chain with existing genesis.json + # but not with validator role. We need this kind of node in order to + # bootstrap it into a validator after it syncs output1, status1 = run_container_command( - command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\ - --keyring-backend test", mounts) + command_context, + "laconicd", + f"laconicd keys add {parameters.key_name} " + f"--home {laconicd_home_path_in_container} --keyring-backend test", + mounts, + ) if options.debug: print(f"Command output: {output1}") output2, status2 = run_container_command( command_context, "laconicd", - f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test", - mounts) + f"laconicd keys show {parameters.key_name} -a " + f"--home {laconicd_home_path_in_container} --keyring-backend test", + mounts, + ) print(f"Node account address: {output2}") output3, status3 = run_container_command( command_context, "laconicd", - f"laconicd cometbft show-validator --home {laconicd_home_path_in_container}", - mounts) + f"laconicd cometbft show-validator " + f"--home {laconicd_home_path_in_container}", + mounts, + ) print(f"Node validator address: {output3}") elif phase == SetupPhase.CREATE: @@ -287,42 +352,73 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo print(f"Error: network directory {network_dir} doesn't exist") sys.exit(1) - # In the CREATE phase, we are either a "coordinator" node, generating the genesis.json file ourselves - # OR we are a "not-coordinator" node, consuming a genesis file we got from the coordinator node. + # In the CREATE phase, we are either a "coordinator" node, + # generating the genesis.json file ourselves + # OR we are a "not-coordinator" node, consuming a genesis file from + # the coordinator node. if parameters.genesis_file: # We got the genesis file from elsewhere # Copy it into our network dir genesis_file_path = Path(parameters.genesis_file) if not os.path.exists(genesis_file_path): - print(f"Error: supplied genesis file: {parameters.genesis_file} does not exist.") + print( + f"Error: supplied genesis file: {parameters.genesis_file} " + "does not exist." + ) sys.exit(1) - copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path))) + copyfile( + genesis_file_path, + os.path.join( + network_dir, "config", os.path.basename(genesis_file_path) + ), + ) else: # We're generating the genesis file # First look in the supplied gentx files for the other nodes' keys - other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_address_list) + other_node_keys = _get_node_keys_from_gentx_files( + parameters.gentx_address_list + ) # Add those keys to our genesis, with balances we determine here (why?) for other_node_key in other_node_keys: outputk, statusk = run_container_command( - command_context, "laconicd", f"laconicd genesis add-genesis-account {other_node_key} \ - 12900000000000000000000{currency}\ - --home {laconicd_home_path_in_container} --keyring-backend test", mounts) + command_context, + "laconicd", + f"laconicd genesis add-genesis-account {other_node_key} " + f"12900000000000000000000{currency} " + f"--home {laconicd_home_path_in_container} " + "--keyring-backend test", + mounts, + ) if options.debug: print(f"Command output: {outputk}") # Copy the gentx json files into our network dir _copy_gentx_files(network_dir, parameters.gentx_file_list) # Now we can run collect-gentxs output1, status1 = run_container_command( - command_context, "laconicd", f"laconicd genesis collect-gentxs --home {laconicd_home_path_in_container}", mounts) + command_context, + "laconicd", + f"laconicd genesis collect-gentxs " + f"--home {laconicd_home_path_in_container}", + mounts, + ) if options.debug: print(f"Command output: {output1}") - print(f"Generated genesis file, please copy to other nodes as required: \ - {os.path.join(network_dir, 'config', 'genesis.json')}") - # Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now + genesis_path = os.path.join(network_dir, "config", "genesis.json") + print( + f"Generated genesis file, please copy to other nodes " + f"as required: {genesis_path}" + ) + # Last thing, collect-gentxs puts a likely bogus set of persistent_peers + # in config.toml so we remove that now _remove_persistent_peers(network_dir) # In both cases we validate the genesis file now output2, status1 = run_container_command( - command_context, "laconicd", f"laconicd genesis validate-genesis --home {laconicd_home_path_in_container}", mounts) + command_context, + "laconicd", + f"laconicd genesis validate-genesis " + f"--home {laconicd_home_path_in_container}", + mounts, + ) print(f"validate-genesis result: {output2}") else: @@ -341,15 +437,23 @@ def create(deployment_context: DeploymentContext, extra_args): sys.exit(1) config_dir_path = network_dir_path.joinpath("config") if not (config_dir_path.exists() and config_dir_path.is_dir()): - print(f"Error: supplied network directory does not contain a config directory: {config_dir_path}") + print( + f"Error: supplied network directory does not contain " + f"a config directory: {config_dir_path}" + ) sys.exit(1) data_dir_path = network_dir_path.joinpath("data") if not (data_dir_path.exists() and data_dir_path.is_dir()): - print(f"Error: supplied network directory does not contain a data directory: {data_dir_path}") + print( + f"Error: supplied network directory does not contain " + f"a data directory: {data_dir_path}" + ) sys.exit(1) # Copy the network directory contents into our deployment # TODO: change this to work with non local paths - deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config") + deployment_config_dir = deployment_context.deployment_dir.joinpath( + "data", "laconicd-config" + ) copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True) # If supplied, add the initial persistent peers to the config file if extra_args[1]: @@ -360,7 +464,9 @@ def create(deployment_context: DeploymentContext, extra_args): _set_listen_address(deployment_config_dir) # Copy the data directory contents into our deployment # TODO: change this to work with non local paths - deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data") + deployment_data_dir = deployment_context.deployment_dir.joinpath( + "data", "laconicd-data" + ) copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True) diff --git a/stack_orchestrator/data/stacks/test/deploy/commands.py b/stack_orchestrator/data/stacks/test/deploy/commands.py index e6601eae..69436213 100644 --- a/stack_orchestrator/data/stacks/test/deploy/commands.py +++ b/stack_orchestrator/data/stacks/test/deploy/commands.py @@ -24,16 +24,20 @@ default_spec_file_content = """config: """ -# Output a known string to a know file in the bind mounted directory ./container-output-dir +# Output a known string to a know file in the bind mounted directory +# ./container-output-dir # for test purposes -- test checks that the file was written. def setup(command_context: DeployCommandContext, parameters, extra_args): host_directory = "./container-output-dir" host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory) host_directory_absolute.mkdir(parents=True, exist_ok=True) - mounts = [ - VolumeMapping(host_directory_absolute, "/data") - ] - output, status = run_container_command(command_context, "test", "echo output-data > /data/output-file && echo success", mounts) + mounts = [VolumeMapping(host_directory_absolute, "/data")] + output, status = run_container_command( + command_context, + "test", + "echo output-data > /data/output-file && echo success", + mounts, + ) def init(command_context: DeployCommandContext): @@ -44,7 +48,7 @@ def init(command_context: DeployCommandContext): def create(command_context: DeployCommandContext, extra_args): data = "create-command-output-data" output_file_path = command_context.deployment_dir.joinpath("create-file") - with open(output_file_path, 'w+') as output_file: + with open(output_file_path, "w+") as output_file: output_file.write(data) diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index d14ee9ca..0c7a9e48 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -15,7 +15,11 @@ from pathlib import Path from python_on_whales import DockerClient, DockerException -from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator +from stack_orchestrator.deploy.deployer import ( + Deployer, + DeployerException, + DeployerConfigGenerator, +) from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.opts import opts @@ -24,9 +28,19 @@ class DockerDeployer(Deployer): name: str = "compose" type: str - def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None: - self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name, - compose_env_file=compose_env_file) + def __init__( + self, + type, + deployment_context: DeploymentContext, + compose_files, + compose_project_name, + compose_env_file, + ) -> None: + self.docker = DockerClient( + compose_files=compose_files, + compose_project_name=compose_project_name, + compose_env_file=compose_env_file, + ) self.type = type def up(self, detach, skip_cluster_management, services): @@ -68,29 +82,54 @@ class DockerDeployer(Deployer): def port(self, service, private_port): if not opts.o.dry_run: try: - return self.docker.compose.port(service=service, private_port=private_port) + return self.docker.compose.port( + service=service, private_port=private_port + ) except DockerException as e: raise DeployerException(e) def execute(self, service, command, tty, envs): if not opts.o.dry_run: try: - return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) + return self.docker.compose.execute( + service=service, command=command, tty=tty, envs=envs + ) except DockerException as e: raise DeployerException(e) def logs(self, services, tail, follow, stream): if not opts.o.dry_run: try: - return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream) + return self.docker.compose.logs( + services=services, tail=tail, follow=follow, stream=stream + ) except DockerException as e: raise DeployerException(e) - def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): + def run( + self, + image: str, + command=None, + user=None, + volumes=None, + entrypoint=None, + env={}, + ports=[], + detach=False, + ): if not opts.o.dry_run: try: - return self.docker.run(image=image, command=command, user=user, volumes=volumes, - entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) + return self.docker.run( + image=image, + command=command, + user=user, + volumes=volumes, + entrypoint=entrypoint, + envs=env, + detach=detach, + publish=ports, + publish_all=len(ports) == 0, + ) except DockerException as e: raise DeployerException(e) @@ -106,20 +145,25 @@ class DockerDeployer(Deployer): # Deployment directory is parent of compose directory compose_dir = Path(self.docker.compose_files[0]).parent deployment_dir = compose_dir.parent - job_compose_file = deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml" + job_compose_file = ( + deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml" + ) if not job_compose_file.exists(): - raise DeployerException(f"Job compose file not found: {job_compose_file}") + raise DeployerException( + f"Job compose file not found: {job_compose_file}" + ) if opts.o.verbose: print(f"Running job from: {job_compose_file}") - # Create a DockerClient for the job compose file with same project name and env file + # Create a DockerClient for the job compose file with same + # project name and env file # This allows the job to access volumes from the main deployment job_docker = DockerClient( compose_files=[job_compose_file], compose_project_name=self.docker.compose_project_name, - compose_env_file=self.docker.compose_env_file + compose_env_file=self.docker.compose_env_file, ) # Run the job with --rm flag to remove container after completion @@ -130,7 +174,6 @@ class DockerDeployer(Deployer): class DockerDeployerConfigGenerator(DeployerConfigGenerator): - def __init__(self, type: str) -> None: super().__init__() diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 6f3ed83d..bae5a76b 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -47,20 +47,23 @@ from stack_orchestrator.deploy.k8s import k8s_command @click.group() @click.option("--include", help="only start these components") -@click.option("--exclude", help="don\'t start these components") +@click.option("--exclude", help="don't start these components") @click.option("--env-file", help="env file to be used") @click.option("--cluster", help="specify a non-default cluster name") -@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)") +@click.option( + "--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)" +) @click.pass_context def command(ctx, include, exclude, env_file, cluster, deploy_to): - '''deploy a stack''' + """deploy a stack""" # k8s subcommand doesn't require a stack if ctx.invoked_subcommand == "k8s": return - # Although in theory for some subcommands (e.g. deploy create) the stack can be inferred, - # Click doesn't allow us to know that here, so we make providing the stack mandatory + # Although in theory for some subcommands (e.g. deploy create) the stack + # can be inferred, Click doesn't allow us to know that here, so we make + # providing the stack mandatory stack = global_options2(ctx).stack if not stack: print("Error: --stack option is required") @@ -73,19 +76,29 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to): deploy_to = "compose" stack = get_stack_path(stack) - ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to) - # Subcommand is executed now, by the magic of click - - -def create_deploy_context( - global_context, - deployment_context: DeploymentContext, + ctx.obj = create_deploy_context( + global_options2(ctx), + None, stack, include, exclude, cluster, env_file, - deploy_to) -> DeployCommandContext: + deploy_to, + ) + # Subcommand is executed now, by the magic of click + + +def create_deploy_context( + global_context, + deployment_context: DeploymentContext, + stack, + include, + exclude, + cluster, + env_file, + deploy_to, +) -> DeployCommandContext: # Extract the cluster name from the deployment, if we have one if deployment_context and cluster is None: cluster = deployment_context.get_cluster_id() @@ -101,17 +114,27 @@ def create_deploy_context( # For helm chart deployments, skip compose file loading if is_helm_chart_deployment: - cluster_context = ClusterContext(global_context, cluster, [], [], [], None, env_file) + cluster_context = ClusterContext( + global_context, cluster, [], [], [], None, env_file + ) else: - cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) + cluster_context = _make_cluster_context( + global_context, stack, include, exclude, cluster, env_file + ) - deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files, - compose_project_name=cluster_context.cluster, - compose_env_file=cluster_context.env_file) + deployer = getDeployer( + deploy_to, + deployment_context, + compose_files=cluster_context.compose_files, + compose_project_name=cluster_context.cluster, + compose_env_file=cluster_context.env_file, + ) return DeployCommandContext(stack, cluster_context, deployer) -def up_operation(ctx, services_list, stay_attached=False, skip_cluster_management=False): +def up_operation( + ctx, services_list, stay_attached=False, skip_cluster_management=False +): global_context = ctx.parent.parent.obj deploy_context = ctx.obj cluster_context = deploy_context.cluster_context @@ -119,21 +142,38 @@ def up_operation(ctx, services_list, stay_attached=False, skip_cluster_managemen for attr, value in container_exec_env.items(): os.environ[attr] = value if global_context.verbose: - print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") + print( + f"Running compose up with container_exec_env: {container_exec_env}, " + f"extra_args: {services_list}" + ) for pre_start_command in cluster_context.pre_start_commands: _run_command(global_context, cluster_context.cluster, pre_start_command) - deploy_context.deployer.up(detach=not stay_attached, skip_cluster_management=skip_cluster_management, services=services_list) + deploy_context.deployer.up( + detach=not stay_attached, + skip_cluster_management=skip_cluster_management, + services=services_list, + ) for post_start_command in cluster_context.post_start_commands: _run_command(global_context, cluster_context.cluster, post_start_command) - _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env) + _orchestrate_cluster_config( + global_context, + cluster_context.config, + deploy_context.deployer, + container_exec_env, + ) def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False): timeout_arg = None if extra_args_list: timeout_arg = extra_args_list[0] - # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully - ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes, skip_cluster_management=skip_cluster_management) + # Specify shutdown timeout (default 10s) to give services enough time to + # shutdown gracefully + ctx.obj.deployer.down( + timeout=timeout_arg, + volumes=delete_volumes, + skip_cluster_management=skip_cluster_management, + ) def status_operation(ctx): @@ -160,7 +200,11 @@ def ps_operation(ctx): if mapping is None: print(f"{port_mapping}", end="") else: - print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="") + print( + f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}" + f"->{port_mapping}", + end="", + ) comma = ", " print() else: @@ -195,7 +239,9 @@ def exec_operation(ctx, extra_args): if global_context.verbose: print(f"Running compose exec {service_name} {command_to_exec}") try: - ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env, tty=True) + ctx.obj.deployer.execute( + service_name, command_to_exec, envs=container_exec_env, tty=True + ) except DeployerException: print("container command returned error exit status") @@ -203,7 +249,9 @@ def exec_operation(ctx, extra_args): def logs_operation(ctx, tail: int, follow: bool, extra_args: str): extra_args_list = list(extra_args) or None services_list = extra_args_list if extra_args_list is not None else [] - logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True) + logs_stream = ctx.obj.deployer.logs( + services=services_list, tail=tail, follow=follow, stream=True + ) for stream_type, stream_content in logs_stream: print(stream_content.decode("utf-8"), end="") @@ -220,7 +268,7 @@ def run_job_operation(ctx, job_name: str, helm_release: str = None): @command.command() -@click.argument('extra_args', nargs=-1) # help: command: up +@click.argument("extra_args", nargs=-1) # help: command: up @click.pass_context def up(ctx, extra_args): extra_args_list = list(extra_args) or None @@ -228,8 +276,10 @@ def up(ctx, extra_args): @command.command() -@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes") -@click.argument('extra_args', nargs=-1) # help: command: down +@click.option( + "--delete-volumes/--preserve-volumes", default=False, help="delete data volumes" +) +@click.argument("extra_args", nargs=-1) # help: command: down @click.pass_context def down(ctx, delete_volumes, extra_args): extra_args_list = list(extra_args) or None @@ -243,14 +293,14 @@ def ps(ctx): @command.command() -@click.argument('extra_args', nargs=-1) # help: command: port +@click.argument("extra_args", nargs=-1) # help: command: port @click.pass_context def port(ctx, extra_args): port_operation(ctx, extra_args) @command.command() -@click.argument('extra_args', nargs=-1) # help: command: exec +@click.argument("extra_args", nargs=-1) # help: command: exec @click.pass_context def exec(ctx, extra_args): exec_operation(ctx, extra_args) @@ -259,19 +309,21 @@ def exec(ctx, extra_args): @command.command() @click.option("--tail", "-n", default=None, help="number of lines to display") @click.option("--follow", "-f", is_flag=True, default=False, help="follow log output") -@click.argument('extra_args', nargs=-1) # help: command: logs +@click.argument("extra_args", nargs=-1) # help: command: logs @click.pass_context def logs(ctx, tail, follow, extra_args): logs_operation(ctx, tail, follow, extra_args) def get_stack_status(ctx, stack): - ctx_copy = copy.copy(ctx) ctx_copy.stack = stack cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None) - deployer = Deployer(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) + deployer = Deployer( + compose_files=cluster_context.compose_files, + compose_project_name=cluster_context.cluster, + ) # TODO: refactor to avoid duplicating this code above if ctx.verbose: print("Running compose ps") @@ -289,14 +341,15 @@ def get_stack_status(ctx, stack): def _make_runtime_env(ctx): container_exec_env = { "CERC_HOST_UID": f"{os.getuid()}", - "CERC_HOST_GID": f"{os.getgid()}" + "CERC_HOST_GID": f"{os.getgid()}", } container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {}) return container_exec_env def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude): - # Create default unique, stable cluster name from confile file path and stack name if provided + # Create default unique, stable cluster name from confile file path and + # stack name if provided if deployment: path = os.path.realpath(os.path.abspath(compose_dir)) else: @@ -311,7 +364,8 @@ def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) return cluster -# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack +# stack has to be either PathLike pointing to a stack yml file, or a +# string with the name of a known stack def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): dev_root_path = get_dev_root_path(ctx) @@ -320,16 +374,22 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): if deployment: compose_dir = stack.joinpath("compose") else: - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure - compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose") + # See: + # https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + compose_dir = ( + Path(__file__).absolute().parent.parent.joinpath("data", "compose") + ) if cluster is None: - cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) + cluster = _make_default_cluster_name( + deployment, compose_dir, stack, include, exclude + ) else: _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data + with resources.open_text(data, "pod-list.txt") as pod_list_file: all_pods = pod_list_file.read().splitlines() @@ -337,8 +397,8 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): if stack: stack_config = get_parsed_stack_config(stack) # TODO: syntax check the input here - pods_in_scope = stack_config['pods'] - cluster_config = stack_config['config'] if 'config' in stack_config else None + pods_in_scope = stack_config["pods"] + cluster_config = stack_config["config"] if "config" in stack_config else None else: pods_in_scope = all_pods cluster_config = None @@ -361,29 +421,47 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): if include_exclude_check(pod_name, include, exclude): if pod_repository is None or pod_repository == "internal": if deployment: - compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") + compose_file_name = os.path.join( + compose_dir, f"docker-compose-{pod_path}.yml" + ) else: compose_file_name = resolve_compose_file(stack, pod_name) else: if deployment: - compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml") + compose_file_name = os.path.join( + compose_dir, f"docker-compose-{pod_name}.yml" + ) pod_pre_start_command = pod.get("pre_start_command") pod_post_start_command = pod.get("post_start_command") - script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts") + script_dir = compose_dir.parent.joinpath( + "pods", pod_name, "scripts" + ) if pod_pre_start_command is not None: - pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command)) + pre_start_commands.append( + os.path.join(script_dir, pod_pre_start_command) + ) if pod_post_start_command is not None: - post_start_commands.append(os.path.join(script_dir, pod_post_start_command)) + post_start_commands.append( + os.path.join(script_dir, pod_post_start_command) + ) else: # TODO: fix this code for external stack with scripts - pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) - compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml") + pod_root_dir = os.path.join( + dev_root_path, pod_repository.split("/")[-1], pod["path"] + ) + compose_file_name = os.path.join( + pod_root_dir, f"docker-compose-{pod_name}.yml" + ) pod_pre_start_command = pod.get("pre_start_command") pod_post_start_command = pod.get("post_start_command") if pod_pre_start_command is not None: - pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command)) + pre_start_commands.append( + os.path.join(pod_root_dir, pod_pre_start_command) + ) if pod_post_start_command is not None: - post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command)) + post_start_commands.append( + os.path.join(pod_root_dir, pod_post_start_command) + ) compose_files.append(compose_file_name) else: if ctx.verbose: @@ -392,7 +470,15 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): if ctx.verbose: print(f"files: {compose_files}") - return ClusterContext(ctx, cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file) + return ClusterContext( + ctx, + cluster, + compose_files, + pre_start_commands, + post_start_commands, + cluster_config, + env_file, + ) def _convert_to_new_format(old_pod_array): @@ -401,11 +487,7 @@ def _convert_to_new_format(old_pod_array): if isinstance(old_pod, dict): new_pod_array.append(old_pod) else: - new_pod = { - "name": old_pod, - "repository": "internal", - "path": old_pod - } + new_pod = {"name": old_pod, "repository": "internal", "path": old_pod} new_pod_array.append(new_pod) return new_pod_array @@ -419,14 +501,15 @@ def _run_command(ctx, cluster_name, command): command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name if ctx.debug: command_env["CERC_SCRIPT_DEBUG"] = "true" - command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir) + command_result = subprocess.run( + command_file, shell=True, env=command_env, cwd=command_dir + ) if command_result.returncode != 0: print(f"FATAL Error running command: {command}") sys.exit(1) def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env): - @dataclass class ConfigDirective: source_container: str @@ -444,24 +527,32 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en container_config[directive].split(".")[0], container_config[directive].split(".")[1], container, - directive + directive, ) if ctx.verbose: - print(f"Setting {pd.destination_container}.{pd.destination_variable}" - f" = {pd.source_container}.{pd.source_variable}") + print( + f"Setting {pd.destination_container}.{pd.destination_variable}" + f" = {pd.source_container}.{pd.source_variable}" + ) # TODO: add a timeout waiting_for_data = True destination_output = "*** no output received yet ***" while waiting_for_data: - # TODO: fix the script paths so they're consistent between containers + # TODO: fix the script paths so they're consistent between + # containers source_value = None try: - source_value = deployer.execute(pd.source_container, - ["sh", "-c", - "sh /docker-entrypoint-scripts.d/export-" - f"{pd.source_variable}.sh"], - tty=False, - envs=container_exec_env) + source_value = deployer.execute( + pd.source_container, + [ + "sh", + "-c", + "sh /docker-entrypoint-scripts.d/export-" + f"{pd.source_variable}.sh", + ], + tty=False, + envs=container_exec_env, + ) except DeployerException as error: if ctx.debug: print(f"Docker exception reading config source: {error}") @@ -469,20 +560,28 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en # "It returned with code 1" if "It returned with code 1" in str(error): if ctx.verbose: - print("Config export script returned an error, re-trying") - # If the script failed to execute (e.g. the file is not there) then we get: + print( + "Config export script returned an error, re-trying" + ) + # If the script failed to execute + # (e.g. the file is not there) then we get: # "It returned with code 2" if "It returned with code 2" in str(error): print(f"Fatal error reading config source: {error}") if source_value: if ctx.debug: print(f"fetched source value: {source_value}") - destination_output = deployer.execute(pd.destination_container, - ["sh", "-c", - f"sh /scripts/import-{pd.destination_variable}.sh" - f" {source_value}"], - tty=False, - envs=container_exec_env) + destination_output = deployer.execute( + pd.destination_container, + [ + "sh", + "-c", + f"sh /scripts/import-{pd.destination_variable}.sh" + f" {source_value}", + ], + tty=False, + envs=container_exec_env, + ) waiting_for_data = False if ctx.debug and not waiting_for_data: print(f"destination output: {destination_output}") diff --git a/stack_orchestrator/deploy/deploy_types.py b/stack_orchestrator/deploy/deploy_types.py index f59d9f67..bdea68f5 100644 --- a/stack_orchestrator/deploy/deploy_types.py +++ b/stack_orchestrator/deploy/deploy_types.py @@ -21,7 +21,8 @@ from stack_orchestrator.deploy.deployer import Deployer @dataclass class ClusterContext: - options: CommandOptions # TODO: this should be in its own object not stuffed in here + # TODO: this should be in its own object not stuffed in here + options: CommandOptions cluster: str compose_files: List[str] pre_start_commands: List[str] diff --git a/stack_orchestrator/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py index 9e204baa..84019069 100644 --- a/stack_orchestrator/deploy/deploy_util.py +++ b/stack_orchestrator/deploy/deploy_util.py @@ -15,7 +15,12 @@ from typing import List, Any from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping -from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file +from stack_orchestrator.util import ( + get_parsed_stack_config, + get_yaml, + get_pod_list, + resolve_compose_file, +) from stack_orchestrator.opts import opts @@ -38,7 +43,7 @@ def _container_image_from_service(stack: str, service: str): def parsed_pod_files_map_from_file_names(pod_files): - parsed_pod_yaml_map : Any = {} + parsed_pod_yaml_map: Any = {} for pod_file in pod_files: with open(pod_file, "r") as pod_file_descriptor: parsed_pod_file = get_yaml().load(pod_file_descriptor) @@ -73,7 +78,9 @@ def _volumes_to_docker(mounts: List[VolumeMapping]): return result -def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]): +def run_container_command( + ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping] +): deployer = ctx.deployer container_image = _container_image_from_service(ctx.stack, service) docker_volumes = _volumes_to_docker(mounts) @@ -81,11 +88,14 @@ def run_container_command(ctx: DeployCommandContext, service: str, command: str, print(f"Running this command in {service} container: {command}") docker_output = deployer.run( container_image, - ["-c", command], entrypoint="sh", - # Current laconicd container has a bug where it crashes when run not as root - # Commented out line below is a workaround. Created files end up owned by root on the host + ["-c", command], + entrypoint="sh", + # Current laconicd container has a bug where it crashes when run not + # as root + # Commented out line below is a workaround. Created files end up + # owned by root on the host # user=f"{os.getuid()}:{os.getgid()}", - volumes=docker_volumes - ) + volumes=docker_volumes, + ) # There doesn't seem to be a way to get an exit code from docker.run() return (docker_output, 0) diff --git a/stack_orchestrator/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py index 766833bf..68bf24b2 100644 --- a/stack_orchestrator/deploy/deployer.py +++ b/stack_orchestrator/deploy/deployer.py @@ -18,7 +18,6 @@ from pathlib import Path class Deployer(ABC): - @abstractmethod def up(self, detach, skip_cluster_management, services): pass @@ -52,7 +51,17 @@ class Deployer(ABC): pass @abstractmethod - def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): + def run( + self, + image: str, + command=None, + user=None, + volumes=None, + entrypoint=None, + env={}, + ports=[], + detach=False, + ): pass @abstractmethod @@ -66,7 +75,6 @@ class DeployerException(Exception): class DeployerConfigGenerator(ABC): - @abstractmethod def generate(self, deployment_dir: Path): pass diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py index 2d01729e..1de14cc5 100644 --- a/stack_orchestrator/deploy/deployer_factory.py +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -14,8 +14,14 @@ # along with this program. If not, see . from stack_orchestrator import constants -from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator -from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator +from stack_orchestrator.deploy.k8s.deploy_k8s import ( + K8sDeployer, + K8sDeployerConfigGenerator, +) +from stack_orchestrator.deploy.compose.deploy_docker import ( + DockerDeployer, + DockerDeployerConfigGenerator, +) def getDeployerConfigGenerator(type: str, deployment_context): @@ -27,10 +33,27 @@ def getDeployerConfigGenerator(type: str, deployment_context): print(f"ERROR: deploy-to {type} is not valid") -def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file): +def getDeployer( + type: str, deployment_context, compose_files, compose_project_name, compose_env_file +): if type == "compose" or type is None: - return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file) - elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: - return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file) + return DockerDeployer( + type, + deployment_context, + compose_files, + compose_project_name, + compose_env_file, + ) + elif ( + type == type == constants.k8s_deploy_type + or type == constants.k8s_kind_deploy_type + ): + return K8sDeployer( + type, + deployment_context, + compose_files, + compose_project_name, + compose_env_file, + ) else: print(f"ERROR: deploy-to {type} is not valid") diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index 196b3301..35abea3c 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -18,8 +18,19 @@ from pathlib import Path import sys from stack_orchestrator import constants from stack_orchestrator.deploy.images import push_images_operation -from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation -from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context, update_operation +from stack_orchestrator.deploy.deploy import ( + up_operation, + down_operation, + ps_operation, + port_operation, + status_operation, +) +from stack_orchestrator.deploy.deploy import ( + exec_operation, + logs_operation, + create_deploy_context, + update_operation, +) from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -28,7 +39,7 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext @click.option("--dir", required=True, help="path to deployment directory") @click.pass_context def command(ctx, dir): - '''manage a deployment''' + """manage a deployment""" # Check that --stack wasn't supplied if ctx.parent.obj.stack: @@ -40,7 +51,10 @@ def command(ctx, dir): print(f"Error: deployment directory {dir} does not exist") sys.exit(1) if not dir_path.is_dir(): - print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory") + print( + f"Error: supplied deployment directory path {dir} exists but is a " + "file not a directory" + ) sys.exit(1) # Store the deployment context for subcommands deployment_context = DeploymentContext() @@ -57,16 +71,31 @@ def make_deploy_context(ctx) -> DeployCommandContext: else: deployment_type = constants.compose_deploy_type stack = context.deployment_dir - return create_deploy_context(ctx.parent.parent.obj, context, stack, None, None, - cluster_name, env_file, deployment_type) + return create_deploy_context( + ctx.parent.parent.obj, + context, + stack, + None, + None, + cluster_name, + env_file, + deployment_type, + ) # TODO: remove legacy up command since it's an alias for start @command.command() -@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout") -@click.option("--skip-cluster-management/--perform-cluster-management", - default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)") -@click.argument('extra_args', nargs=-1) # help: command: up +@click.option( + "--stay-attached/--detatch-terminal", + default=False, + help="detatch or not to see container stdout", +) +@click.option( + "--skip-cluster-management/--perform-cluster-management", + default=False, + help="Skip cluster initialization/tear-down (only for kind-k8s deployments)", +) +@click.argument("extra_args", nargs=-1) # help: command: up @click.pass_context def up(ctx, stay_attached, skip_cluster_management, extra_args): ctx.obj = make_deploy_context(ctx) @@ -76,10 +105,17 @@ def up(ctx, stay_attached, skip_cluster_management, extra_args): # start is the preferred alias for up @command.command() -@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout") -@click.option("--skip-cluster-management/--perform-cluster-management", - default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)") -@click.argument('extra_args', nargs=-1) # help: command: up +@click.option( + "--stay-attached/--detatch-terminal", + default=False, + help="detatch or not to see container stdout", +) +@click.option( + "--skip-cluster-management/--perform-cluster-management", + default=False, + help="Skip cluster initialization/tear-down (only for kind-k8s deployments)", +) +@click.argument("extra_args", nargs=-1) # help: command: up @click.pass_context def start(ctx, stay_attached, skip_cluster_management, extra_args): ctx.obj = make_deploy_context(ctx) @@ -89,10 +125,15 @@ def start(ctx, stay_attached, skip_cluster_management, extra_args): # TODO: remove legacy up command since it's an alias for stop @command.command() -@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes") -@click.option("--skip-cluster-management/--perform-cluster-management", - default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)") -@click.argument('extra_args', nargs=-1) # help: command: down +@click.option( + "--delete-volumes/--preserve-volumes", default=False, help="delete data volumes" +) +@click.option( + "--skip-cluster-management/--perform-cluster-management", + default=False, + help="Skip cluster initialization/tear-down (only for kind-k8s deployments)", +) +@click.argument("extra_args", nargs=-1) # help: command: down @click.pass_context def down(ctx, delete_volumes, skip_cluster_management, extra_args): # Get the stack config file name @@ -103,10 +144,15 @@ def down(ctx, delete_volumes, skip_cluster_management, extra_args): # stop is the preferred alias for down @command.command() -@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes") -@click.option("--skip-cluster-management/--perform-cluster-management", - default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)") -@click.argument('extra_args', nargs=-1) # help: command: down +@click.option( + "--delete-volumes/--preserve-volumes", default=False, help="delete data volumes" +) +@click.option( + "--skip-cluster-management/--perform-cluster-management", + default=False, + help="Skip cluster initialization/tear-down (only for kind-k8s deployments)", +) +@click.argument("extra_args", nargs=-1) # help: command: down @click.pass_context def stop(ctx, delete_volumes, skip_cluster_management, extra_args): # TODO: add cluster name and env file here @@ -130,7 +176,7 @@ def push_images(ctx): @command.command() -@click.argument('extra_args', nargs=-1) # help: command: port +@click.argument("extra_args", nargs=-1) # help: command: port @click.pass_context def port(ctx, extra_args): ctx.obj = make_deploy_context(ctx) @@ -138,7 +184,7 @@ def port(ctx, extra_args): @command.command() -@click.argument('extra_args', nargs=-1) # help: command: exec +@click.argument("extra_args", nargs=-1) # help: command: exec @click.pass_context def exec(ctx, extra_args): ctx.obj = make_deploy_context(ctx) @@ -148,7 +194,7 @@ def exec(ctx, extra_args): @command.command() @click.option("--tail", "-n", default=None, help="number of lines to display") @click.option("--follow", "-f", is_flag=True, default=False, help="follow log output") -@click.argument('extra_args', nargs=-1) # help: command: logs +@click.argument("extra_args", nargs=-1) # help: command: logs @click.pass_context def logs(ctx, tail, follow, extra_args): ctx.obj = make_deploy_context(ctx) @@ -170,11 +216,15 @@ def update(ctx): @command.command() -@click.argument('job_name') -@click.option('--helm-release', help='Helm release name (only for k8s helm chart deployments, defaults to chart name)') +@click.argument("job_name") +@click.option( + "--helm-release", + help="Helm release name (for k8s helm chart deployments, defaults to chart name)", +) @click.pass_context def run_job(ctx, job_name, helm_release): - '''run a one-time job from the stack''' + """run a one-time job from the stack""" from stack_orchestrator.deploy.deploy import run_job_operation + ctx.obj = make_deploy_context(ctx) run_job_operation(ctx, job_name, helm_release) diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py index 239e9c5c..7f588774 100644 --- a/stack_orchestrator/deploy/deployment_context.py +++ b/stack_orchestrator/deploy/deployment_context.py @@ -1,4 +1,3 @@ - # Copyright © 2022, 2023 Vulcanize # This program is free software: you can redistribute it and/or modify diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 7afcb40d..514e035d 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -24,10 +24,23 @@ from secrets import token_hex import sys from stack_orchestrator import constants from stack_orchestrator.opts import opts -from stack_orchestrator.util import (get_stack_path, get_parsed_deployment_spec, get_parsed_stack_config, - global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, - get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file, - resolve_config_dir, get_job_list, get_job_file_path) +from stack_orchestrator.util import ( + get_stack_path, + get_parsed_deployment_spec, + get_parsed_stack_config, + global_options, + get_yaml, + get_pod_list, + get_pod_file_path, + pod_has_scripts, + get_pod_script_paths, + get_plugin_code_paths, + error_exit, + env_var_map_from_file, + resolve_config_dir, + get_job_list, + get_job_file_path, +) from stack_orchestrator.deploy.spec import Spec from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator @@ -49,17 +62,15 @@ def _get_ports(stack): if "services" in parsed_pod_file: for svc_name, svc in parsed_pod_file["services"].items(): if "ports" in svc: - # Ports can appear as strings or numbers. We normalize them as strings. + # Ports can appear as strings or numbers. We normalize them as + # strings. ports[svc_name] = [str(x) for x in svc["ports"]] return ports def _get_named_volumes(stack): # Parse the compose files looking for named volumes - named_volumes = { - "rw": [], - "ro": [] - } + named_volumes = {"rw": [], "ro": []} parsed_stack = get_parsed_stack_config(stack) pods = get_pod_list(parsed_stack) yaml = get_yaml() @@ -75,7 +86,7 @@ def _get_named_volumes(stack): ret[svc_name] = { "volume": parts[0], "mount": parts[1], - "options": parts[2] if len(parts) == 3 else None + "options": parts[2] if len(parts) == 3 else None, } return ret @@ -88,7 +99,10 @@ def _get_named_volumes(stack): for vu in find_vol_usage(parsed_pod_file, volume).values(): read_only = vu["options"] == "ro" if read_only: - if vu["volume"] not in named_volumes["rw"] and vu["volume"] not in named_volumes["ro"]: + if ( + vu["volume"] not in named_volumes["rw"] + and vu["volume"] not in named_volumes["ro"] + ): named_volumes["ro"].append(vu["volume"]) else: if vu["volume"] not in named_volumes["rw"]: @@ -108,10 +122,13 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir): absolute_path.mkdir(parents=True, exist_ok=True) else: if not path.exists(): - print(f"WARNING: mount path for volume {volume} does not exist: {path_string}") + print( + f"WARNING: mount path for volume {volume} does not exist: {path_string}" + ) -# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml +# See: +# https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml def _fixup_pod_file(pod, spec, compose_dir): deployment_type = spec[constants.deploy_to_key] # Fix up volumes @@ -123,7 +140,11 @@ def _fixup_pod_file(pod, spec, compose_dir): if volume in spec_volumes: volume_spec = spec_volumes[volume] if volume_spec: - volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}" + volume_spec_fixedup = ( + volume_spec + if Path(volume_spec).is_absolute() + else f".{volume_spec}" + ) _create_bind_dir_if_relative(volume, volume_spec, compose_dir) # this is Docker specific if spec.is_docker_deployment(): @@ -132,8 +153,8 @@ def _fixup_pod_file(pod, spec, compose_dir): "driver_opts": { "type": "none", "device": volume_spec_fixedup, - "o": "bind" - } + "o": "bind", + }, } pod["volumes"][volume] = new_volume_spec @@ -189,12 +210,17 @@ def call_stack_deploy_init(deploy_command_context): init_done = True else: # TODO: remove this restriction - print(f"Skipping init() from plugin {python_file_path}. Only one init() is allowed.") + print( + f"Skipping init() from plugin {python_file_path}. " + "Only one init() is allowed." + ) return ret # TODO: fold this with function above -def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetupCommand, extra_args): +def call_stack_deploy_setup( + deploy_command_context, parameters: LaconicStackSetupCommand, extra_args +): # Link with the python file in the stack # Call a function in it # If no function found, return None @@ -247,7 +273,13 @@ def _find_extra_config_dirs(parsed_pod_file, pod): def _get_mapped_ports(stack: str, map_recipe: str): - port_map_recipes = ["any-variable-random", "localhost-same", "any-same", "localhost-fixed-random", "any-fixed-random"] + port_map_recipes = [ + "any-variable-random", + "localhost-same", + "any-same", + "localhost-fixed-random", + "any-fixed-random", + ] ports = _get_ports(stack) if ports: # Implement any requested mapping recipe @@ -259,7 +291,9 @@ def _get_mapped_ports(stack: str, map_recipe: str): orig_port = ports_array[x] # Strip /udp suffix if present bare_orig_port = orig_port.replace("/udp", "") - random_port = random.randint(20000, 50000) # Beware: we're relying on luck to not collide + random_port = random.randint( + 20000, 50000 + ) # Beware: we're relying on luck to not collide if map_recipe == "any-variable-random": # This is the default so take no action pass @@ -278,7 +312,10 @@ def _get_mapped_ports(stack: str, map_recipe: str): else: print("Error: bad map_recipe") else: - print(f"Error: --map-ports-to-host must specify one of: {port_map_recipes}") + print( + f"Error: --map-ports-to-host must specify one of: " + f"{port_map_recipes}" + ) sys.exit(1) return ports @@ -303,33 +340,54 @@ def _parse_config_variables(variable_values: str): @click.command() @click.option("--config", help="Provide config variables for the deployment") -@click.option("--config-file", help="Provide config variables in a file for the deployment") +@click.option( + "--config-file", help="Provide config variables in a file for the deployment" +) @click.option("--kube-config", help="Provide a config file for a k8s deployment") -@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") +@click.option( + "--image-registry", + help="Provide a container image registry url for this k8s cluster", +) @click.option("--output", required=True, help="Write yaml spec file here") -@click.option("--map-ports-to-host", required=False, - help="Map ports to the host as one of: any-variable-random (default), " - "localhost-same, any-same, localhost-fixed-random, any-fixed-random") +@click.option( + "--map-ports-to-host", + required=False, + help="Map ports to the host as one of: any-variable-random (default), " + "localhost-same, any-same, localhost-fixed-random, any-fixed-random", +) @click.pass_context -def init(ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host): +def init( + ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host +): stack = global_options(ctx).stack deployer_type = ctx.obj.deployer.type deploy_command_context = ctx.obj return init_operation( deploy_command_context, - stack, deployer_type, - config, config_file, + stack, + deployer_type, + config, + config_file, kube_config, image_registry, output, - map_ports_to_host) + map_ports_to_host, + ) # The init command's implementation is in a separate function so that we can # call it from other commands, bypassing the click decoration stuff -def init_operation(deploy_command_context, stack, deployer_type, config, - config_file, kube_config, image_registry, output, map_ports_to_host): - +def init_operation( + deploy_command_context, + stack, + deployer_type, + config, + config_file, + kube_config, + image_registry, + output, + map_ports_to_host, +): default_spec_file_content = call_stack_deploy_init(deploy_command_context) spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} if deployer_type == "k8s": @@ -340,13 +398,20 @@ def init_operation(deploy_command_context, stack, deployer_type, config, if image_registry: spec_file_content.update({constants.image_registry_key: image_registry}) else: - print("WARNING: --image-registry not specified, only default container registries (eg, Docker Hub) will be available") + print( + "WARNING: --image-registry not specified, only default container " + "registries (eg, Docker Hub) will be available" + ) else: # Check for --kube-config supplied for non-relevant deployer types if kube_config is not None: - error_exit(f"--kube-config is not allowed with a {deployer_type} deployment") + error_exit( + f"--kube-config is not allowed with a {deployer_type} deployment" + ) if image_registry is not None: - error_exit(f"--image-registry is not allowed with a {deployer_type} deployment") + error_exit( + f"--image-registry is not allowed with a {deployer_type} deployment" + ) if default_spec_file_content: spec_file_content.update(default_spec_file_content) config_variables = _parse_config_variables(config) @@ -395,7 +460,9 @@ def init_operation(deploy_command_context, stack, deployer_type, config, spec_file_content["configmaps"] = configmap_descriptors if opts.o.debug: - print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") + print( + f"Creating spec file for stack: {stack} with content: {spec_file_content}" + ) with open(output, "w") as output_file: get_yaml().dump(spec_file_content, output_file) @@ -443,22 +510,45 @@ def _check_volume_definitions(spec): @click.command() -@click.option("--spec-file", required=True, help="Spec file to use to create this deployment") +@click.option( + "--spec-file", required=True, help="Spec file to use to create this deployment" +) @click.option("--deployment-dir", help="Create deployment files in this directory") -@click.option("--helm-chart", is_flag=True, default=False, help="Generate Helm chart instead of deploying (k8s only)") +@click.option( + "--helm-chart", + is_flag=True, + default=False, + help="Generate Helm chart instead of deploying (k8s only)", +) # TODO: Hack @click.option("--network-dir", help="Network configuration supplied in this directory") @click.option("--initial-peers", help="Initial set of persistent peers") @click.pass_context def create(ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers): deployment_command_context = ctx.obj - return create_operation(deployment_command_context, spec_file, deployment_dir, helm_chart, network_dir, initial_peers) + return create_operation( + deployment_command_context, + spec_file, + deployment_dir, + helm_chart, + network_dir, + initial_peers, + ) # The init command's implementation is in a separate function so that we can # call it from other commands, bypassing the click decoration stuff -def create_operation(deployment_command_context, spec_file, deployment_dir, helm_chart, network_dir, initial_peers): - parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file)) +def create_operation( + deployment_command_context, + spec_file, + deployment_dir, + helm_chart, + network_dir, + initial_peers, +): + parsed_spec = Spec( + os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file) + ) _check_volume_definitions(parsed_spec) stack_name = parsed_spec["stack"] deployment_type = parsed_spec[constants.deploy_to_key] @@ -483,17 +573,24 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm # Branch to Helm chart generation flow if --helm-chart flag is set if deployment_type == "k8s" and helm_chart: - from stack_orchestrator.deploy.k8s.helm.chart_generator import generate_helm_chart + from stack_orchestrator.deploy.k8s.helm.chart_generator import ( + generate_helm_chart, + ) + generate_helm_chart(stack_name, spec_file, deployment_dir_path) return # Exit early for helm chart generation # Existing deployment flow continues unchanged # Copy any config varibles from the spec file into an env file suitable for compose - _write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name)) + _write_config_file( + spec_file, deployment_dir_path.joinpath(constants.config_file_name) + ) # Copy any k8s config file into the deployment dir if deployment_type == "k8s": - _write_kube_config_file(Path(parsed_spec[constants.kube_config_key]), - deployment_dir_path.joinpath(constants.kube_config_filename)) + _write_kube_config_file( + Path(parsed_spec[constants.kube_config_key]), + deployment_dir_path.joinpath(constants.kube_config_filename), + ) # Copy the pod files into the deployment dir, fixing up content pods = get_pod_list(parsed_stack) destination_compose_dir = deployment_dir_path.joinpath("compose") @@ -510,7 +607,9 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm if opts.o.debug: print(f"extra config dirs: {extra_config_dirs}") _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) - with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file: + with open( + destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w" + ) as output_file: yaml.dump(parsed_pod_file, output_file) # Copy the config files for the pod, if any config_dirs = {pod} @@ -518,8 +617,11 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm for config_dir in config_dirs: source_config_dir = resolve_config_dir(stack_name, config_dir) if os.path.exists(source_config_dir): - destination_config_dir = deployment_dir_path.joinpath("config", config_dir) - # If the same config dir appears in multiple pods, it may already have been copied + destination_config_dir = deployment_dir_path.joinpath( + "config", config_dir + ) + # If the same config dir appears in multiple pods, it may already have + # been copied if not os.path.exists(destination_config_dir): copytree(source_config_dir, destination_config_dir) # Copy the script files for the pod, if any @@ -532,8 +634,12 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm for configmap in parsed_spec.get_configmaps(): source_config_dir = resolve_config_dir(stack_name, configmap) if os.path.exists(source_config_dir): - destination_config_dir = deployment_dir_path.joinpath("configmaps", configmap) - copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True) + destination_config_dir = deployment_dir_path.joinpath( + "configmaps", configmap + ) + copytree( + source_config_dir, destination_config_dir, dirs_exist_ok=True + ) else: # TODO: We should probably only do this if the volume is marked :ro. for volume_name, volume_path in parsed_spec.get_volumes().items(): @@ -542,8 +648,14 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm if os.path.exists(source_config_dir) and os.listdir(source_config_dir): destination_config_dir = deployment_dir_path.joinpath(volume_path) # Only copy if the destination exists and _is_ empty. - if os.path.exists(destination_config_dir) and not os.listdir(destination_config_dir): - copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True) + if os.path.exists(destination_config_dir) and not os.listdir( + destination_config_dir + ): + copytree( + source_config_dir, + destination_config_dir, + dirs_exist_ok=True, + ) # Copy the job files into the deployment dir (for Docker deployments) jobs = get_job_list(parsed_stack) @@ -555,22 +667,31 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm if job_file_path and job_file_path.exists(): parsed_job_file = yaml.load(open(job_file_path, "r")) _fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir) - with open(destination_compose_jobs_dir.joinpath("docker-compose-%s.yml" % job), "w") as output_file: + with open( + destination_compose_jobs_dir.joinpath( + "docker-compose-%s.yml" % job + ), + "w", + ) as output_file: yaml.dump(parsed_job_file, output_file) if opts.o.debug: print(f"Copied job compose file: {job}") # Delegate to the stack's Python code - # The deploy create command doesn't require a --stack argument so we need to insert the - # stack member here. + # The deploy create command doesn't require a --stack argument so we need + # to insert the stack member here. deployment_command_context.stack = stack_name deployment_context = DeploymentContext() deployment_context.init(deployment_dir_path) # Call the deployer to generate any deployer-specific files (e.g. for kind) - deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context) + deployer_config_generator = getDeployerConfigGenerator( + deployment_type, deployment_context + ) # TODO: make deployment_dir_path a Path above deployer_config_generator.generate(deployment_dir_path) - call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context]) + call_stack_deploy_create( + deployment_context, [network_dir, initial_peers, deployment_command_context] + ) # TODO: this code should be in the stack .py files but @@ -580,18 +701,50 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm @click.option("--node-moniker", help="Moniker for this node") @click.option("--chain-id", help="The new chain id") @click.option("--key-name", help="Name for new node key") -@click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes") -@click.option("--gentx-addresses", type=str, help="List of comma-delimited validator addresses for other nodes") +@click.option( + "--gentx-files", help="List of comma-delimited gentx filenames from other nodes" +) +@click.option( + "--gentx-addresses", + type=str, + help="List of comma-delimited validator addresses for other nodes", +) @click.option("--genesis-file", help="Genesis file for the network") -@click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase") +@click.option( + "--initialize-network", is_flag=True, default=False, help="Initialize phase" +) @click.option("--join-network", is_flag=True, default=False, help="Join phase") @click.option("--connect-network", is_flag=True, default=False, help="Connect phase") @click.option("--create-network", is_flag=True, default=False, help="Create phase") @click.option("--network-dir", help="Directory for network files") -@click.argument('extra_args', nargs=-1) +@click.argument("extra_args", nargs=-1) @click.pass_context -def setup(ctx, node_moniker, chain_id, key_name, gentx_files, gentx_addresses, genesis_file, initialize_network, join_network, - connect_network, create_network, network_dir, extra_args): - parmeters = LaconicStackSetupCommand(chain_id, node_moniker, key_name, initialize_network, join_network, connect_network, - create_network, gentx_files, gentx_addresses, genesis_file, network_dir) +def setup( + ctx, + node_moniker, + chain_id, + key_name, + gentx_files, + gentx_addresses, + genesis_file, + initialize_network, + join_network, + connect_network, + create_network, + network_dir, + extra_args, +): + parmeters = LaconicStackSetupCommand( + chain_id, + node_moniker, + key_name, + initialize_network, + join_network, + connect_network, + create_network, + gentx_files, + gentx_addresses, + genesis_file, + network_dir, + ) call_stack_deploy_setup(ctx.obj, parmeters, extra_args) diff --git a/stack_orchestrator/deploy/images.py b/stack_orchestrator/deploy/images.py index f2af1c09..2c57bf47 100644 --- a/stack_orchestrator/deploy/images.py +++ b/stack_orchestrator/deploy/images.py @@ -32,7 +32,9 @@ def _image_needs_pushed(image: str): def _remote_tag_for_image(image: str, remote_repo_url: str): # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy major_parts = image.split("/", 2) - image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0] + image_name_with_version = ( + major_parts[1] if 2 == len(major_parts) else major_parts[0] + ) (image_name, image_version) = image_name_with_version.split(":") if image_version == "local": return f"{remote_repo_url}/{image_name}:deploy" @@ -61,17 +63,22 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags): docker = DockerClient() remote_tag = _remote_tag_for_image(local_tag, remote_repo_url) - new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags] + new_remote_tags = [ + _remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags + ] docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags) def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str): # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy major_parts = image.split("/", 2) - image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0] + image_name_with_version = ( + major_parts[1] if 2 == len(major_parts) else major_parts[0] + ) (image_name, image_version) = image_name_with_version.split(":") if image_version == "local": - # Salt the tag with part of the deployment id to make it unique to this deployment + # Salt the tag with part of the deployment id to make it unique to this + # deployment deployment_tag = deployment_id[-8:] return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}" else: @@ -79,7 +86,9 @@ def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: # TODO: needs lots of error handling -def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext): +def push_images_operation( + command_context: DeployCommandContext, deployment_context: DeploymentContext +): # Get the list of images for the stack cluster_context = command_context.cluster_context images: Set[str] = images_for_deployment(cluster_context.compose_files) @@ -88,14 +97,18 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont docker = DockerClient() for image in images: if _image_needs_pushed(image): - remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id) + remote_tag = remote_tag_for_image_unique( + image, remote_repo_url, deployment_context.id + ) if opts.o.verbose: print(f"Tagging {image} to {remote_tag}") docker.image.tag(image, remote_tag) # Run docker push commands to upload for image in images: if _image_needs_pushed(image): - remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id) + remote_tag = remote_tag_for_image_unique( + image, remote_repo_url, deployment_context.id + ) if opts.o.verbose: print(f"Pushing image {remote_tag}") docker.image.push(remote_tag) diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 7cd4306b..a906c341 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -21,22 +21,33 @@ from typing import Any, List, Set from stack_orchestrator.opts import opts from stack_orchestrator.util import env_var_map_from_file -from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files +from stack_orchestrator.deploy.k8s.helpers import ( + named_volumes_from_pod_files, + volume_mounts_for_service, + volumes_for_pod_files, +) from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path -from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs -from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment +from stack_orchestrator.deploy.k8s.helpers import ( + envs_from_environment_variables_map, + envs_from_compose_file, + merge_envs, +) +from stack_orchestrator.deploy.deploy_util import ( + parsed_pod_files_map_from_file_names, + images_for_deployment, +) from stack_orchestrator.deploy.deploy_types import DeployEnvVars from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits from stack_orchestrator.deploy.images import remote_tag_for_image_unique -DEFAULT_VOLUME_RESOURCES = Resources({ - "reservations": {"storage": "2Gi"} -}) +DEFAULT_VOLUME_RESOURCES = Resources({"reservations": {"storage": "2Gi"}}) -DEFAULT_CONTAINER_RESOURCES = Resources({ - "reservations": {"cpus": "1.0", "memory": "2000M"}, - "limits": {"cpus": "4.0", "memory": "8000M"}, -}) +DEFAULT_CONTAINER_RESOURCES = Resources( + { + "reservations": {"cpus": "1.0", "memory": "2000M"}, + "limits": {"cpus": "4.0", "memory": "8000M"}, + } +) def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements: @@ -54,8 +65,7 @@ def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequi return ret return client.V1ResourceRequirements( - requests=to_dict(resources.reservations), - limits=to_dict(resources.limits) + requests=to_dict(resources.reservations), limits=to_dict(resources.limits) ) @@ -73,10 +83,12 @@ class ClusterInfo: self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) # Find the set of images in the pods self.image_set = images_for_deployment(pod_files) - self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file)) + self.environment_variables = DeployEnvVars( + env_var_map_from_file(compose_env_file) + ) self.app_name = deployment_name self.spec = spec - if (opts.o.debug): + if opts.o.debug: print(f"Env vars: {self.environment_variables.map}") def get_nodeports(self): @@ -90,7 +102,8 @@ class ClusterInfo: for raw_port in [str(p) for p in service_info["ports"]]: if opts.o.debug: print(f"service port: {raw_port}") - # Parse protocol suffix (e.g., "8001/udp" -> port=8001, protocol=UDP) + # Parse protocol suffix (e.g., "8001/udp" -> port=8001, + # protocol=UDP) protocol = "TCP" port_str = raw_port if "/" in raw_port: @@ -106,22 +119,31 @@ class ClusterInfo: node_port = None pod_port = int(port_str) service = client.V1Service( - metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}-{protocol.lower()}"), + metadata=client.V1ObjectMeta( + name=( + f"{self.app_name}-nodeport-" + f"{pod_port}-{protocol.lower()}" + ) + ), spec=client.V1ServiceSpec( type="NodePort", - ports=[client.V1ServicePort( - port=pod_port, - target_port=pod_port, - node_port=node_port, - protocol=protocol - )], - selector={"app": self.app_name} - ) + ports=[ + client.V1ServicePort( + port=pod_port, + target_port=pod_port, + node_port=node_port, + protocol=protocol, + ) + ], + selector={"app": self.app_name}, + ), ) nodeports.append(service) return nodeports - def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"): + def get_ingress( + self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod" + ): # No ingress for a deployment that has no http-proxy defined, for now http_proxy_info_list = self.spec.get_http_proxy() ingress = None @@ -133,10 +155,20 @@ class ClusterInfo: # TODO: good enough parsing for webapp deployment for now host_name = http_proxy_info["host-name"] rules = [] - tls = [client.V1IngressTLS( - hosts=certificate["spec"]["dnsNames"] if certificate else [host_name], - secret_name=certificate["spec"]["secretName"] if certificate else f"{self.app_name}-tls" - )] if use_tls else None + tls = ( + [ + client.V1IngressTLS( + hosts=certificate["spec"]["dnsNames"] + if certificate + else [host_name], + secret_name=certificate["spec"]["secretName"] + if certificate + else f"{self.app_name}-tls", + ) + ] + if use_tls + else None + ) paths = [] for route in http_proxy_info["routes"]: path = route["path"] @@ -145,28 +177,26 @@ class ClusterInfo: print(f"proxy config: {path} -> {proxy_to}") # proxy_to has the form : proxy_to_port = int(proxy_to.split(":")[1]) - paths.append(client.V1HTTPIngressPath( - path_type="Prefix", - path=path, - backend=client.V1IngressBackend( - service=client.V1IngressServiceBackend( - # TODO: this looks wrong - name=f"{self.app_name}-service", - # TODO: pull port number from the service - port=client.V1ServiceBackendPort(number=proxy_to_port) - ) + paths.append( + client.V1HTTPIngressPath( + path_type="Prefix", + path=path, + backend=client.V1IngressBackend( + service=client.V1IngressServiceBackend( + # TODO: this looks wrong + name=f"{self.app_name}-service", + # TODO: pull port number from the service + port=client.V1ServiceBackendPort(number=proxy_to_port), + ) + ), ) - )) - rules.append(client.V1IngressRule( - host=host_name, - http=client.V1HTTPIngressRuleValue( - paths=paths ) - )) - spec = client.V1IngressSpec( - tls=tls, - rules=rules + rules.append( + client.V1IngressRule( + host=host_name, http=client.V1HTTPIngressRuleValue(paths=paths) + ) ) + spec = client.V1IngressSpec(tls=tls, rules=rules) ingress_annotations = { "kubernetes.io/ingress.class": "nginx", @@ -176,10 +206,9 @@ class ClusterInfo: ingress = client.V1Ingress( metadata=client.V1ObjectMeta( - name=f"{self.app_name}-ingress", - annotations=ingress_annotations + name=f"{self.app_name}-ingress", annotations=ingress_annotations ), - spec=spec + spec=spec, ) return ingress @@ -198,12 +227,9 @@ class ClusterInfo: metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"), spec=client.V1ServiceSpec( type="ClusterIP", - ports=[client.V1ServicePort( - port=port, - target_port=port - )], - selector={"app": self.app_name} - ) + ports=[client.V1ServicePort(port=port, target_port=port)], + selector={"app": self.app_name}, + ), ) return service @@ -226,7 +252,7 @@ class ClusterInfo: labels = { "app": self.app_name, - "volume-label": f"{self.app_name}-{volume_name}" + "volume-label": f"{self.app_name}-{volume_name}", } if volume_path: storage_class_name = "manual" @@ -240,11 +266,13 @@ class ClusterInfo: access_modes=["ReadWriteOnce"], storage_class_name=storage_class_name, resources=to_k8s_resource_requirements(resources), - volume_name=k8s_volume_name + volume_name=k8s_volume_name, ) pvc = client.V1PersistentVolumeClaim( - metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels), - spec=spec + metadata=client.V1ObjectMeta( + name=f"{self.app_name}-{volume_name}", labels=labels + ), + spec=spec, ) result.append(pvc) return result @@ -260,20 +288,27 @@ class ClusterInfo: continue if not cfg_map_path.startswith("/"): - cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path) + cfg_map_path = os.path.join( + os.path.dirname(self.spec.file_path), cfg_map_path + ) - # Read in all the files at a single-level of the directory. This mimics the behavior - # of `kubectl create configmap foo --from-file=/path/to/dir` + # Read in all the files at a single-level of the directory. + # This mimics the behavior of + # `kubectl create configmap foo --from-file=/path/to/dir` data = {} for f in os.listdir(cfg_map_path): full_path = os.path.join(cfg_map_path, f) if os.path.isfile(full_path): - data[f] = base64.b64encode(open(full_path, 'rb').read()).decode('ASCII') + data[f] = base64.b64encode(open(full_path, "rb").read()).decode( + "ASCII" + ) spec = client.V1ConfigMap( - metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}", - labels={"configmap-label": cfg_map_name}), - binary_data=data + metadata=client.V1ObjectMeta( + name=f"{self.app_name}-{cfg_map_name}", + labels={"configmap-label": cfg_map_name}, + ), + binary_data=data, ) result.append(spec) return result @@ -287,10 +322,14 @@ class ClusterInfo: resources = DEFAULT_VOLUME_RESOURCES for volume_name, volume_path in spec_volumes.items(): # We only need to create a volume if it is fully qualified HostPath. - # Otherwise, we create the PVC and expect the node to allocate the volume for us. + # Otherwise, we create the PVC and expect the node to allocate the volume + # for us. if not volume_path: if opts.o.debug: - print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.") + print( + f"{volume_name} does not require an explicit " + "PersistentVolume, since it is not a bind-mount." + ) continue if volume_name not in named_volumes: @@ -299,22 +338,29 @@ class ClusterInfo: continue if not os.path.isabs(volume_path): - print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.") + print( + f"WARNING: {volume_name}:{volume_path} is not absolute, " + "cannot bind volume." + ) continue if self.spec.is_kind_deployment(): - host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name)) + host_path = client.V1HostPathVolumeSource( + path=get_kind_pv_bind_mount_path(volume_name) + ) else: host_path = client.V1HostPathVolumeSource(path=volume_path) spec = client.V1PersistentVolumeSpec( storage_class_name="manual", access_modes=["ReadWriteOnce"], capacity=to_k8s_resource_requirements(resources).requests, - host_path=host_path + host_path=host_path, ) pv = client.V1PersistentVolume( - metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", - labels={"volume-label": f"{self.app_name}-{volume_name}"}), + metadata=client.V1ObjectMeta( + name=f"{self.app_name}-{volume_name}", + labels={"volume-label": f"{self.app_name}-{volume_name}"}, + ), spec=spec, ) result.append(pv) @@ -336,7 +382,8 @@ class ClusterInfo: container_ports = [] if "ports" in service_info: for raw_port in [str(p) for p in service_info["ports"]]: - # Parse protocol suffix (e.g., "8001/udp" -> port=8001, protocol=UDP) + # Parse protocol suffix (e.g., "8001/udp" -> port=8001, + # protocol=UDP) protocol = "TCP" port_str = raw_port if "/" in raw_port: @@ -346,31 +393,48 @@ class ClusterInfo: if ":" in port_str: port_str = port_str.split(":")[-1] port = int(port_str) - container_ports.append(client.V1ContainerPort(container_port=port, protocol=protocol)) + container_ports.append( + client.V1ContainerPort( + container_port=port, protocol=protocol + ) + ) if opts.o.debug: print(f"image: {image}") print(f"service ports: {container_ports}") - merged_envs = merge_envs( - envs_from_compose_file( - service_info["environment"], self.environment_variables.map), self.environment_variables.map - ) if "environment" in service_info else self.environment_variables.map + merged_envs = ( + merge_envs( + envs_from_compose_file( + service_info["environment"], self.environment_variables.map + ), + self.environment_variables.map, + ) + if "environment" in service_info + else self.environment_variables.map + ) envs = envs_from_environment_variables_map(merged_envs) if opts.o.debug: print(f"Merged envs: {envs}") # Re-write the image tag for remote deployment # Note self.app_name has the same value as deployment_id - image_to_use = remote_tag_for_image_unique( - image, - self.spec.get_image_registry(), - self.app_name) if self.spec.get_image_registry() is not None else image - volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name) + image_to_use = ( + remote_tag_for_image_unique( + image, self.spec.get_image_registry(), self.app_name + ) + if self.spec.get_image_registry() is not None + else image + ) + volume_mounts = volume_mounts_for_service( + self.parsed_pod_yaml_map, service_name + ) # Handle command/entrypoint from compose file # In docker-compose: entrypoint -> k8s command, command -> k8s args container_command = None container_args = None if "entrypoint" in service_info: entrypoint = service_info["entrypoint"] - container_command = entrypoint if isinstance(entrypoint, list) else [entrypoint] + container_command = ( + entrypoint if isinstance(entrypoint, list) else [entrypoint] + ) if "command" in service_info: cmd = service_info["command"] container_args = cmd if isinstance(cmd, list) else cmd.split() @@ -387,12 +451,16 @@ class ClusterInfo: privileged=self.spec.get_privileged(), capabilities=client.V1Capabilities( add=self.spec.get_capabilities() - ) if self.spec.get_capabilities() else None + ) + if self.spec.get_capabilities() + else None, ), resources=to_k8s_resource_requirements(resources), ) containers.append(container) - volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name) + volumes = volumes_for_pod_files( + self.parsed_pod_yaml_map, self.spec, self.app_name + ) image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")] annotations = None @@ -415,55 +483,54 @@ class ClusterInfo: affinities = [] for rule in self.spec.get_node_affinities(): # TODO add some input validation here - label_name = rule['label'] - label_value = rule['value'] - affinities.append(client.V1NodeSelectorTerm( - match_expressions=[client.V1NodeSelectorRequirement( - key=label_name, - operator="In", - values=[label_value] - )] - ) + label_name = rule["label"] + label_value = rule["value"] + affinities.append( + client.V1NodeSelectorTerm( + match_expressions=[ + client.V1NodeSelectorRequirement( + key=label_name, operator="In", values=[label_value] + ) + ] ) + ) affinity = client.V1Affinity( node_affinity=client.V1NodeAffinity( - required_during_scheduling_ignored_during_execution=client.V1NodeSelector( - node_selector_terms=affinities - )) + required_during_scheduling_ignored_during_execution=( + client.V1NodeSelector(node_selector_terms=affinities) + ) ) + ) if self.spec.get_node_tolerations(): tolerations = [] for toleration in self.spec.get_node_tolerations(): # TODO add some input validation here - toleration_key = toleration['key'] - toleration_value = toleration['value'] - tolerations.append(client.V1Toleration( - effect="NoSchedule", - key=toleration_key, - operator="Equal", - value=toleration_value - )) + toleration_key = toleration["key"] + toleration_value = toleration["value"] + tolerations.append( + client.V1Toleration( + effect="NoSchedule", + key=toleration_key, + operator="Equal", + value=toleration_value, + ) + ) template = client.V1PodTemplateSpec( - metadata=client.V1ObjectMeta( - annotations=annotations, - labels=labels - ), + metadata=client.V1ObjectMeta(annotations=annotations, labels=labels), spec=client.V1PodSpec( containers=containers, image_pull_secrets=image_pull_secrets, volumes=volumes, affinity=affinity, - tolerations=tolerations - ), + tolerations=tolerations, + ), ) spec = client.V1DeploymentSpec( replicas=self.spec.get_replicas(), - template=template, selector={ - "matchLabels": - {"app": self.app_name} - } + template=template, + selector={"matchLabels": {"app": self.app_name}}, ) deployment = client.V1Deployment( diff --git a/stack_orchestrator/deploy/k8s/helm/chart_generator.py b/stack_orchestrator/deploy/k8s/helm/chart_generator.py index e2235472..aad3f684 100644 --- a/stack_orchestrator/deploy/k8s/helm/chart_generator.py +++ b/stack_orchestrator/deploy/k8s/helm/chart_generator.py @@ -23,12 +23,12 @@ from stack_orchestrator.util import ( get_pod_file_path, get_job_list, get_job_file_path, - error_exit + error_exit, ) from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import ( check_kompose_available, get_kompose_version, - convert_to_helm_chart + convert_to_helm_chart, ) from stack_orchestrator.util import get_yaml @@ -108,14 +108,17 @@ def _post_process_chart(chart_dir: Path, chart_name: str, jobs: list) -> None: _wrap_job_templates_with_conditionals(chart_dir, jobs) -def generate_helm_chart(stack_path: str, spec_file: str, deployment_dir_path: Path) -> None: +def generate_helm_chart( + stack_path: str, spec_file: str, deployment_dir_path: Path +) -> None: """ Generate a self-sufficient Helm chart from stack compose files using Kompose. Args: stack_path: Path to the stack directory spec_file: Path to the deployment spec file - deployment_dir_path: Deployment directory path (already created with deployment.yml) + deployment_dir_path: Deployment directory path + (already created with deployment.yml) Output structure: deployment-dir/ @@ -208,13 +211,14 @@ def generate_helm_chart(stack_path: str, spec_file: str, deployment_dir_path: Pa # 5. Create chart directory and invoke Kompose chart_dir = deployment_dir_path / "chart" - print(f"Converting {len(compose_files)} compose file(s) to Helm chart using Kompose...") + print( + f"Converting {len(compose_files)} compose file(s) to Helm chart " + "using Kompose..." + ) try: output = convert_to_helm_chart( - compose_files=compose_files, - output_dir=chart_dir, - chart_name=chart_name + compose_files=compose_files, output_dir=chart_dir, chart_name=chart_name ) if opts.o.debug: print(f"Kompose output:\n{output}") @@ -291,7 +295,11 @@ Edit the generated template files in `templates/` to customize: print(f" Stack: {stack_path}") # Count generated files - template_files = list((chart_dir / "templates").glob("*.yaml")) if (chart_dir / "templates").exists() else [] + template_files = ( + list((chart_dir / "templates").glob("*.yaml")) + if (chart_dir / "templates").exists() + else [] + ) print(f" Files: {len(template_files)} template(s) generated") print("\nDeployment directory structure:") diff --git a/stack_orchestrator/deploy/k8s/helm/job_runner.py b/stack_orchestrator/deploy/k8s/helm/job_runner.py index 00829971..1a41dacf 100644 --- a/stack_orchestrator/deploy/k8s/helm/job_runner.py +++ b/stack_orchestrator/deploy/k8s/helm/job_runner.py @@ -53,7 +53,7 @@ def run_helm_job( release: str = None, namespace: str = "default", timeout: int = 600, - verbose: bool = False + verbose: bool = False, ) -> None: """ Run a one-time job from a Helm chart. @@ -93,22 +93,31 @@ def run_helm_job( print(f"Running job '{job_name}' from helm chart: {chart_dir}") # Use helm template to render the job manifest - with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as tmp_file: + with tempfile.NamedTemporaryFile( + mode="w", suffix=".yaml", delete=False + ) as tmp_file: try: # Render job template with job enabled # Use --set-json to properly handle job names with dashes jobs_dict = {job_name: {"enabled": True}} values_json = json.dumps(jobs_dict) helm_cmd = [ - "helm", "template", release, str(chart_dir), - "--show-only", job_template_file, - "--set-json", f"jobs={values_json}" + "helm", + "template", + release, + str(chart_dir), + "--show-only", + job_template_file, + "--set-json", + f"jobs={values_json}", ] if verbose: print(f"Running: {' '.join(helm_cmd)}") - result = subprocess.run(helm_cmd, check=True, capture_output=True, text=True) + result = subprocess.run( + helm_cmd, check=True, capture_output=True, text=True + ) tmp_file.write(result.stdout) tmp_file.flush() @@ -121,18 +130,30 @@ def run_helm_job( actual_job_name = manifest.get("metadata", {}).get("name", job_name) # Apply the job manifest - kubectl_apply_cmd = ["kubectl", "apply", "-f", tmp_file.name, "-n", namespace] - subprocess.run(kubectl_apply_cmd, check=True, capture_output=True, text=True) + kubectl_apply_cmd = [ + "kubectl", + "apply", + "-f", + tmp_file.name, + "-n", + namespace, + ] + subprocess.run( + kubectl_apply_cmd, check=True, capture_output=True, text=True + ) if verbose: print(f"Job {actual_job_name} created, waiting for completion...") # Wait for job completion wait_cmd = [ - "kubectl", "wait", "--for=condition=complete", + "kubectl", + "wait", + "--for=condition=complete", f"job/{actual_job_name}", f"--timeout={timeout}s", - "-n", namespace + "-n", + namespace, ] subprocess.run(wait_cmd, check=True, capture_output=True, text=True) diff --git a/stack_orchestrator/deploy/k8s/helm/kompose_wrapper.py b/stack_orchestrator/deploy/k8s/helm/kompose_wrapper.py index 18c3b25c..f9e27e7f 100644 --- a/stack_orchestrator/deploy/k8s/helm/kompose_wrapper.py +++ b/stack_orchestrator/deploy/k8s/helm/kompose_wrapper.py @@ -38,10 +38,7 @@ def get_kompose_version() -> str: raise Exception("kompose not found in PATH") result = subprocess.run( - ["kompose", "version"], - capture_output=True, - text=True, - timeout=10 + ["kompose", "version"], capture_output=True, text=True, timeout=10 ) if result.returncode != 0: @@ -55,7 +52,9 @@ def get_kompose_version() -> str: return version -def convert_to_helm_chart(compose_files: List[Path], output_dir: Path, chart_name: str = None) -> str: +def convert_to_helm_chart( + compose_files: List[Path], output_dir: Path, chart_name: str = None +) -> str: """ Invoke kompose to convert Docker Compose files to a Helm chart. @@ -92,12 +91,7 @@ def convert_to_helm_chart(compose_files: List[Path], output_dir: Path, chart_nam cmd.extend(["--chart", "-o", str(output_dir)]) # Execute kompose - result = subprocess.run( - cmd, - capture_output=True, - text=True, - timeout=60 - ) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=60) if result.returncode != 0: raise Exception( diff --git a/stack_orchestrator/deploy/k8s/k8s_command.py b/stack_orchestrator/deploy/k8s/k8s_command.py index 506a34fe..76bc76b8 100644 --- a/stack_orchestrator/deploy/k8s/k8s_command.py +++ b/stack_orchestrator/deploy/k8s/k8s_command.py @@ -21,21 +21,21 @@ from stack_orchestrator.deploy.k8s.helpers import get_kind_cluster @click.group() @click.pass_context def command(ctx): - '''k8s cluster management commands''' + """k8s cluster management commands""" pass @command.group() @click.pass_context def list(ctx): - '''list k8s resources''' + """list k8s resources""" pass @list.command() @click.pass_context def cluster(ctx): - '''Show the existing kind cluster''' + """Show the existing kind cluster""" existing_cluster = get_kind_cluster() if existing_cluster: print(existing_cluster) diff --git a/stack_orchestrator/deploy/stack.py b/stack_orchestrator/deploy/stack.py index 39ad0083..75d40705 100644 --- a/stack_orchestrator/deploy/stack.py +++ b/stack_orchestrator/deploy/stack.py @@ -19,7 +19,6 @@ from stack_orchestrator.util import get_yaml class Stack: - name: str obj: typing.Any diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py index c51f0781..6d5ea6c2 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -27,7 +27,9 @@ from stack_orchestrator.deploy.deploy_types import DeployCommandContext def _fixup_container_tag(deployment_dir: str, image: str): deployment_dir_path = Path(deployment_dir) - compose_file = deployment_dir_path.joinpath("compose", "docker-compose-webapp-template.yml") + compose_file = deployment_dir_path.joinpath( + "compose", "docker-compose-webapp-template.yml" + ) # replace "cerc/webapp-container:local" in the file with our image tag with open(compose_file) as rfile: contents = rfile.read() @@ -39,13 +41,13 @@ def _fixup_container_tag(deployment_dir: str, image: str): def _fixup_url_spec(spec_file_name: str, url: str): # url is like: https://example.com/path parsed_url = urlparse(url) - http_proxy_spec = f''' + http_proxy_spec = f""" http-proxy: - host-name: {parsed_url.hostname} routes: - path: '{parsed_url.path if parsed_url.path else "/"}' proxy-to: webapp:80 - ''' + """ spec_file_path = Path(spec_file_name) with open(spec_file_path) as rfile: contents = rfile.read() @@ -54,11 +56,15 @@ def _fixup_url_spec(spec_file_name: str, url: str): wfile.write(contents) -def create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file): +def create_deployment( + ctx, deployment_dir, image, url, kube_config, image_registry, env_file +): # Do the equivalent of: - # 1. laconic-so --stack webapp-template deploy --deploy-to k8s init --output webapp-spec.yml + # 1. laconic-so --stack webapp-template deploy --deploy-to k8s init \ + # --output webapp-spec.yml # --config (eqivalent of the contents of my-config.env) - # 2. laconic-so --stack webapp-template deploy --deploy-to k8s create --deployment-dir test-deployment + # 2. laconic-so --stack webapp-template deploy --deploy-to k8s create \ + # --deployment-dir test-deployment # --spec-file webapp-spec.yml # 3. Replace the container image tag with the specified image deployment_dir_path = Path(deployment_dir) @@ -83,17 +89,12 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist kube_config, image_registry, spec_file_name, - None + None, ) # Add the TLS and DNS spec _fixup_url_spec(spec_file_name, url) create_operation( - deploy_command_context, - spec_file_name, - deployment_dir, - False, - None, - None + deploy_command_context, spec_file_name, deployment_dir, False, None, None ) # Fix up the container tag inside the deployment compose file _fixup_container_tag(deployment_dir, image) @@ -103,7 +104,7 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist @click.group() @click.pass_context def command(ctx): - '''manage a webapp deployment''' + """manage a webapp deployment""" # Check that --stack wasn't supplied if ctx.parent.obj.stack: @@ -112,13 +113,20 @@ def command(ctx): @command.command() @click.option("--kube-config", help="Provide a config file for a k8s deployment") -@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") -@click.option("--deployment-dir", help="Create deployment files in this directory", required=True) +@click.option( + "--image-registry", + help="Provide a container image registry url for this k8s cluster", +) +@click.option( + "--deployment-dir", help="Create deployment files in this directory", required=True +) @click.option("--image", help="image to deploy", required=True) @click.option("--url", help="url to serve", required=True) @click.option("--env-file", help="environment file for webapp") @click.pass_context def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file): - '''create a deployment for the specified webapp container''' + """create a deployment for the specified webapp container""" - return create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file) + return create_deployment( + ctx, deployment_dir, image, url, kube_config, image_registry, env_file + ) diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py b/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py index 24a529c2..bd9d7450 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py @@ -112,7 +112,8 @@ def process_app_deployment_request( ) elif "preexisting" == fqdn_policy: raise Exception( - f"No pre-existing DnsRecord {dns_lrn} could be found for request {app_deployment_request.id}." + f"No pre-existing DnsRecord {dns_lrn} could be found for " + f"request {app_deployment_request.id}." ) # 4. get build and runtime config from request @@ -128,7 +129,8 @@ def process_app_deployment_request( parsed = AttrDict(yaml.safe_load(decrypted.data)) if record_owner not in parsed.authorized: raise Exception( - f"{record_owner} not authorized to access config {app_deployment_request.attributes.config.ref}" + f"{record_owner} not authorized to access config " + f"{app_deployment_request.attributes.config.ref}" ) if "env" in parsed.config: env.update(parsed.config.env) @@ -156,8 +158,10 @@ def process_app_deployment_request( deployment_record = laconic.get_record(app_deployment_lrn) deployment_dir = os.path.join(deployment_parent_dir, fqdn) - # At present we use this to generate a unique but stable ID for the app's host container - # TODO: implement support to derive this transparently from the already-unique deployment id + # At present we use this to generate a unique but stable ID for the + # app's host container + # TODO: implement support to derive this transparently from the + # already-unique deployment id unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16] deployment_config_file = os.path.join(deployment_dir, "config.env") deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id @@ -166,11 +170,12 @@ def process_app_deployment_request( if not os.path.exists(deployment_dir): if deployment_record: raise Exception( - "Deployment record %s exists, but not deployment dir %s. Please remove name." - % (app_deployment_lrn, deployment_dir) + "Deployment record %s exists, but not deployment dir %s. " + "Please remove name." % (app_deployment_lrn, deployment_dir) ) logger.log( - f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}" + f"Creating webapp deployment in: {deployment_dir} " + f"with container id: {deployment_container_tag}" ) deploy_webapp.create_deployment( ctx, @@ -187,7 +192,8 @@ def process_app_deployment_request( needs_k8s_deploy = False if force_rebuild: logger.log( - "--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app" + "--force-rebuild is enabled so the container will always be " + "built now, even if nothing has changed in the app" ) # 6. build container (if needed) # TODO: add a comment that explains what this code is doing (not clear to me) @@ -199,11 +205,12 @@ def process_app_deployment_request( needs_k8s_deploy = True # check if the image already exists shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag) - # Note: in the code below, calls to add_tags_to_image() won't work at present. - # This is because SO deployment code in general re-names the container image - # to be unique to the deployment. This is done transparently - # and so when we call add_tags_to_image() here and try to add tags to the remote image, - # we get the image name wrong. Accordingly I've disabled the relevant code for now. + # Note: in the code below, calls to add_tags_to_image() won't + # work at present. This is because SO deployment code in general + # re-names the container image to be unique to the deployment. + # This is done transparently and so when we call add_tags_to_image() + # here and try to add tags to the remote image, we get the image + # name wrong. Accordingly I've disabled the relevant code for now. # This is safe because we are running with --force-rebuild at present if shared_tag_exists and not force_rebuild: # simply add our unique tag to the existing image and we are done @@ -211,7 +218,9 @@ def process_app_deployment_request( f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} " "tagging it with: {deployment_container_tag} to use in this deployment" ) - # add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag) + # add_tags_to_image( + # image_registry, app_image_shared_tag, deployment_container_tag + # ) logger.log("Tag complete") else: extra_build_args = [] # TODO: pull from request @@ -223,11 +232,15 @@ def process_app_deployment_request( logger.log(f"Pushing container image: {deployment_container_tag}") push_container_image(deployment_dir, logger) logger.log("Push complete") - # The build/push commands above will use the unique deployment tag, so now we need to add the shared tag. + # The build/push commands above will use the unique deployment + # tag, so now we need to add the shared tag. logger.log( - f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}" + f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} " + f"to newly built image: {deployment_container_tag}" ) - # add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag) + # add_tags_to_image( + # image_registry, deployment_container_tag, app_image_shared_tag + # ) logger.log("Tag complete") else: logger.log("Requested app is already deployed, skipping build and image push") @@ -306,7 +319,11 @@ def dump_known_requests(filename, requests, status="SEEN"): help="How to handle requests with an FQDN: prohibit, allow, preexisting", default="prohibit", ) -@click.option("--ip", help="IP address of the k8s deployment (to be set in DNS record)", default=None) +@click.option( + "--ip", + help="IP address of the k8s deployment (to be set in DNS record)", + default=None, +) @click.option("--record-namespace-dns", help="eg, lrn://laconic/dns", required=True) @click.option( "--record-namespace-deployments", @@ -364,7 +381,9 @@ def dump_known_requests(filename, requests, status="SEEN"): "--private-key-file", help="The private key for decrypting config.", required=True ) @click.option( - "--registry-lock-file", help="File path to use for registry mutex lock", default=None + "--registry-lock-file", + help="File path to use for registry mutex lock", + default=None, ) @click.option( "--private-key-passphrase", @@ -421,7 +440,8 @@ def command( # noqa: C901 or not dns_suffix ): print( - "--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", + "--dns-suffix, --record-namespace-dns, and " + "--record-namespace-deployments are all required", file=sys.stderr, ) sys.exit(2) @@ -459,14 +479,17 @@ def command( # noqa: C901 include_tags = [tag.strip() for tag in include_tags.split(",") if tag] exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag] - laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file) + laconic = LaconicRegistryClient( + laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file + ) webapp_deployer_record = laconic.get_record(lrn, require=True) payment_address = webapp_deployer_record.attributes.paymentAddress main_logger.log(f"Payment address: {payment_address}") if min_required_payment and not payment_address: print( - f"Minimum payment required, but no payment address listed for deployer: {lrn}.", + f"Minimum payment required, but no payment address listed " + f"for deployer: {lrn}.", file=sys.stderr, ) sys.exit(2) @@ -536,7 +559,8 @@ def command( # noqa: C901 if skip_by_tag(r, include_tags, exclude_tags): main_logger.log( - "Skipping request %s, filtered by tag (include %s, exclude %s, present %s)" + "Skipping request %s, filtered by tag " + "(include %s, exclude %s, present %s)" % (r.id, include_tags, exclude_tags, r.attributes.tags) ) skipped_by_name[requested_name] = r @@ -581,11 +605,13 @@ def command( # noqa: C901 cancellation_requests[r.id], r ): main_logger.log( - f"Found deployment cancellation request for {r.id} at {cancellation_requests[r.id].id}" + f"Found deployment cancellation request for {r.id} " + f"at {cancellation_requests[r.id].id}" ) elif r.id in deployments_by_request: main_logger.log( - f"Found satisfied request for {r.id} at {deployments_by_request[r.id].id}" + f"Found satisfied request for {r.id} " + f"at {deployments_by_request[r.id].id}" ) else: if ( @@ -593,7 +619,8 @@ def command( # noqa: C901 and previous_requests[r.id].get("status", "") != "RETRY" ): main_logger.log( - f"Skipping unsatisfied request {r.id} because we have seen it before." + f"Skipping unsatisfied request {r.id} " + "because we have seen it before." ) else: main_logger.log(f"Request {r.id} needs to processed.") @@ -603,13 +630,7 @@ def command( # noqa: C901 for r in requests_to_check_for_payment: if r.attributes.auction: if auction_requests: - if confirm_auction( - laconic, - r, - lrn, - payment_address, - main_logger - ): + if confirm_auction(laconic, r, lrn, payment_address, main_logger): main_logger.log(f"{r.id}: Auction confirmed.") requests_to_execute.append(r) else: @@ -653,7 +674,10 @@ def command( # noqa: C901 run_log_file = None run_reg_client = laconic try: - run_id = f"{r.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}" + run_id = ( + f"{r.id}-{str(time.time()).split('.')[0]}-" + f"{str(uuid.uuid4()).split('-')[0]}" + ) if log_dir: run_log_dir = os.path.join(log_dir, r.id) if not os.path.exists(run_log_dir): @@ -664,7 +688,9 @@ def command( # noqa: C901 ) run_log_file = open(run_log_file_path, "wt") run_reg_client = LaconicRegistryClient( - laconic_config, log_file=run_log_file, mutex_lock_file=registry_lock_file + laconic_config, + log_file=run_log_file, + mutex_lock_file=registry_lock_file, ) build_logger = TimedLogger(run_id, run_log_file) diff --git a/stack_orchestrator/deploy/webapp/handle_deployment_auction.py b/stack_orchestrator/deploy/webapp/handle_deployment_auction.py index 0a3c65c0..933de899 100644 --- a/stack_orchestrator/deploy/webapp/handle_deployment_auction.py +++ b/stack_orchestrator/deploy/webapp/handle_deployment_auction.py @@ -44,19 +44,27 @@ def process_app_deployment_auction( # Check auction kind if auction.kind != AUCTION_KIND_PROVIDER: - raise Exception(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}") + raise Exception( + f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}" + ) if current_status == "PENDING": # Skip if pending auction not in commit state if auction.status != AuctionStatus.COMMIT: - logger.log(f"Skipping pending request, auction {auction_id} status: {auction.status}") + logger.log( + f"Skipping pending request, auction {auction_id} " + f"status: {auction.status}" + ) return "SKIP", "" # Check max_price bid_amount_int = int(bid_amount) max_price_int = int(auction.maxPrice.quantity) if max_price_int < bid_amount_int: - logger.log(f"Skipping auction {auction_id} with max_price ({max_price_int}) less than bid_amount ({bid_amount_int})") + logger.log( + f"Skipping auction {auction_id} with max_price ({max_price_int}) " + f"less than bid_amount ({bid_amount_int})" + ) return "SKIP", "" # Bid on the auction @@ -121,7 +129,9 @@ def dump_known_auction_requests(filename, requests, status="SEEN"): required=True, ) @click.option( - "--registry-lock-file", help="File path to use for registry mutex lock", default=None + "--registry-lock-file", + help="File path to use for registry mutex lock", + default=None, ) @click.option( "--dry-run", help="Don't do anything, just report what would be done.", is_flag=True @@ -142,7 +152,9 @@ def command( logger = TimedLogger(file=sys.stderr) try: - laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file) + laconic = LaconicRegistryClient( + laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file + ) auctions_requests = laconic.app_deployment_auctions() previous_requests = {} @@ -164,7 +176,8 @@ def command( # Handle already seen requests if r.id in previous_requests: - # If it's not in commit or reveal status, skip the request as we've already seen it + # If it's not in commit or reveal status, skip the request as we've + # already seen it current_status = previous_requests[r.id].get("status", "") result_status = current_status if current_status not in ["COMMIT", "REVEAL"]: @@ -172,7 +185,10 @@ def command( continue reveal_file_path = previous_requests[r.id].get("revealFile", "") - logger.log(f"Found existing auction request {r.id} for application {application}, status {current_status}.") + logger.log( + f"Found existing auction request {r.id} for application " + f"{application}, status {current_status}." + ) else: # It's a fresh request, check application record app = laconic.get_record(application) @@ -181,7 +197,10 @@ def command( result_status = "ERROR" continue - logger.log(f"Found pending auction request {r.id} for application {application}.") + logger.log( + f"Found pending auction request {r.id} for application " + f"{application}." + ) # Add requests to be processed requests_to_execute.append((r, result_status, reveal_file_path)) @@ -190,9 +209,15 @@ def command( result_status = "ERROR" logger.log(f"ERROR: examining request {r.id}: " + str(e)) finally: - logger.log(f"DONE: Examining request {r.id} with result {result_status}.") + logger.log( + f"DONE: Examining request {r.id} with result {result_status}." + ) if result_status in ["ERROR"]: - dump_known_auction_requests(state_file, [AttrDict({"id": r.id, "revealFile": reveal_file_path})], result_status) + dump_known_auction_requests( + state_file, + [AttrDict({"id": r.id, "revealFile": reveal_file_path})], + result_status, + ) logger.log(f"Found {len(requests_to_execute)} request(s) to process.") @@ -214,7 +239,11 @@ def command( logger.log(f"ERROR {r.id}:" + str(e)) finally: logger.log(f"Processing {r.id}: END - {result_status}") - dump_known_auction_requests(state_file, [AttrDict({"id": r.id, "revealFile": reveal_file_path})], result_status) + dump_known_auction_requests( + state_file, + [AttrDict({"id": r.id, "revealFile": reveal_file_path})], + result_status, + ) except Exception as e: logger.log("UNCAUGHT ERROR:" + str(e)) raise e diff --git a/stack_orchestrator/deploy/webapp/registry_mutex.py b/stack_orchestrator/deploy/webapp/registry_mutex.py index e464f58d..1d023230 100644 --- a/stack_orchestrator/deploy/webapp/registry_mutex.py +++ b/stack_orchestrator/deploy/webapp/registry_mutex.py @@ -17,7 +17,7 @@ def acquire_lock(client, lock_file_path, timeout): try: # Check if lock file exists and is potentially stale if os.path.exists(lock_file_path): - with open(lock_file_path, 'r') as lock_file: + with open(lock_file_path, "r") as lock_file: timestamp = float(lock_file.read().strip()) # If lock is stale, remove the lock file @@ -25,13 +25,15 @@ def acquire_lock(client, lock_file_path, timeout): print(f"Stale lock detected, removing lock file {lock_file_path}") os.remove(lock_file_path) else: - print(f"Lock file {lock_file_path} exists and is recent, waiting...") + print( + f"Lock file {lock_file_path} exists and is recent, waiting..." + ) time.sleep(LOCK_RETRY_INTERVAL) continue # Try to create a new lock file with the current timestamp fd = os.open(lock_file_path, os.O_CREAT | os.O_EXCL | os.O_RDWR) - with os.fdopen(fd, 'w') as lock_file: + with os.fdopen(fd, "w") as lock_file: lock_file.write(str(time.time())) client.mutex_lock_acquired = True diff --git a/stack_orchestrator/deploy/webapp/request_webapp_deployment.py b/stack_orchestrator/deploy/webapp/request_webapp_deployment.py index 0fb2cff1..09a041e1 100644 --- a/stack_orchestrator/deploy/webapp/request_webapp_deployment.py +++ b/stack_orchestrator/deploy/webapp/request_webapp_deployment.py @@ -57,7 +57,10 @@ def fatal(msg: str): @click.option("--config-ref", help="The ref of an existing config upload to use.") @click.option( "--make-payment", - help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.", + help=( + "The payment to make (in alnt). The value should be a number or " + "'auto' to use the deployer's minimum required payment." + ), ) @click.option( "--use-payment", help="The TX id of an existing, unused payment", default=None @@ -91,7 +94,10 @@ def command( # noqa: C901 sys.exit(2) if auction_id and (make_payment or use_payment): - print("Cannot specify --auction-id with --make-payment or --use-payment", file=sys.stderr) + print( + "Cannot specify --auction-id with --make-payment or --use-payment", + file=sys.stderr, + ) sys.exit(2) if env_file and config_ref: @@ -117,7 +123,10 @@ def command( # noqa: C901 # Cross check app against application in the auction record auction_app = auction_records_by_id[0].attributes.application if auction_app != app: - fatal(f"Requested application {app} does not match application from auction record {auction_app}") + fatal( + f"Requested application {app} does not match application " + f"from auction record {auction_app}" + ) # Fetch auction details auction = laconic.get_auction(auction_id) @@ -130,7 +139,9 @@ def command( # noqa: C901 # Check auction kind if auction.kind != AUCTION_KIND_PROVIDER: - fatal(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}") + fatal( + f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}" + ) # Check auction status if auction.status != AuctionStatus.COMPLETED: @@ -145,9 +156,14 @@ def command( # noqa: C901 # Get deployer record for all the auction winners for auction_winner in auction_winners: # TODO: Match auction winner address with provider address? - deployer_records_by_owner = laconic.webapp_deployers({"paymentAddress": auction_winner}) + deployer_records_by_owner = laconic.webapp_deployers( + {"paymentAddress": auction_winner} + ) if len(deployer_records_by_owner) == 0: - print(f"WARNING: Unable to locate deployer for auction winner {auction_winner}") + print( + f"WARNING: Unable to locate deployer for auction winner " + f"{auction_winner}" + ) # Take first record with name set target_deployer_record = deployer_records_by_owner[0] diff --git a/stack_orchestrator/deploy/webapp/request_webapp_undeployment.py b/stack_orchestrator/deploy/webapp/request_webapp_undeployment.py index 80cee3ce..3f64bd01 100644 --- a/stack_orchestrator/deploy/webapp/request_webapp_undeployment.py +++ b/stack_orchestrator/deploy/webapp/request_webapp_undeployment.py @@ -17,7 +17,7 @@ import sys import click import yaml -from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient) +from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient def fatal(msg: str): @@ -30,18 +30,19 @@ def fatal(msg: str): "--laconic-config", help="Provide a config file for laconicd", required=True ) @click.option( - "--deployer", - help="The LRN of the deployer to process this request.", - required=True + "--deployer", help="The LRN of the deployer to process this request.", required=True ) @click.option( "--deployment", - help="Deployment record (ApplicationDeploymentRecord) id of the deployment to remove.", + help="Deployment record (ApplicationDeploymentRecord) id of the deployment.", required=True, ) @click.option( "--make-payment", - help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.", + help=( + "The payment to make (in alnt). The value should be a number or " + "'auto' to use the deployer's minimum required payment." + ), ) @click.option( "--use-payment", help="The TX id of an existing, unused payment", default=None diff --git a/stack_orchestrator/deploy/webapp/run_webapp.py b/stack_orchestrator/deploy/webapp/run_webapp.py index f780c6f8..d02c997b 100644 --- a/stack_orchestrator/deploy/webapp/run_webapp.py +++ b/stack_orchestrator/deploy/webapp/run_webapp.py @@ -18,7 +18,8 @@ # env vars: # CERC_REPO_BASE_DIR defaults to ~/cerc -# TODO: display the available list of containers; allow re-build of either all or specific containers +# TODO: display the available list of containers; allow re-build of either +# all or specific containers import hashlib import click @@ -36,7 +37,7 @@ WEBAPP_PORT = 80 @click.option("--port", help="port to use (default random)") @click.pass_context def command(ctx, image, env_file, port): - '''run the specified webapp container''' + """run the specified webapp container""" env = {} if env_file: @@ -46,20 +47,35 @@ def command(ctx, image, env_file, port): hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() cluster = f"laconic-webapp-{hash}" - deployer = getDeployer(type=constants.compose_deploy_type, - deployment_context=None, - compose_files=None, - compose_project_name=cluster, - compose_env_file=None) + deployer = getDeployer( + type=constants.compose_deploy_type, + deployment_context=None, + compose_files=None, + compose_project_name=cluster, + compose_env_file=None, + ) ports = [] if port: ports = [(port, WEBAPP_PORT)] - container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, ports=ports, detach=True) + container = deployer.run( + image, + command=[], + user=None, + volumes=[], + entrypoint=None, + env=env, + ports=ports, + detach=True, + ) # Make configurable? webappPort = f"{WEBAPP_PORT}/tcp" # TODO: This assumes a Docker container object... if webappPort in container.network_settings.ports: mapping = container.network_settings.ports[webappPort][0] - print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""") + print( + f"Image: {image}\n" + f"ID: {container.id}\n" + f"URL: http://localhost:{mapping['HostPort']}" + ) diff --git a/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py b/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py index 90e62197..247e432f 100644 --- a/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py +++ b/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py @@ -51,7 +51,8 @@ def process_app_removal_request( if not os.path.exists(deployment_dir): raise Exception("Deployment directory %s does not exist." % deployment_dir) - # Check if the removal request is from the owner of the DnsRecord or deployment record. + # Check if the removal request is from the owner of the DnsRecord or + # deployment record. matched_owner = match_owner(app_removal_request, deployment_record, dns_record) # Or of the original deployment request. @@ -69,9 +70,10 @@ def process_app_removal_request( % (deployment_record.id, app_removal_request.id) ) - # TODO(telackey): Call the function directly. The easiest way to build the correct click context is to - # exec the process, but it would be better to refactor so we could just call down_operation with the - # necessary parameters + # TODO(telackey): Call the function directly. The easiest way to build + # the correct click context is to exec the process, but it would be better + # to refactor so we could just call down_operation with the necessary + # parameters down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"] if delete_volumes: down_command.append("--delete-volumes") @@ -179,7 +181,9 @@ def dump_known_requests(filename, requests): is_flag=True, ) @click.option( - "--registry-lock-file", help="File path to use for registry mutex lock", default=None + "--registry-lock-file", + help="File path to use for registry mutex lock", + default=None, ) @click.pass_context def command( # noqa: C901 @@ -216,14 +220,17 @@ def command( # noqa: C901 include_tags = [tag.strip() for tag in include_tags.split(",") if tag] exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag] - laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file) + laconic = LaconicRegistryClient( + laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file + ) deployer_record = laconic.get_record(lrn, require=True) payment_address = deployer_record.attributes.paymentAddress main_logger.log(f"Payment address: {payment_address}") if min_required_payment and not payment_address: print( - f"Minimum payment required, but no payment address listed for deployer: {lrn}.", + f"Minimum payment required, but no payment address listed " + f"for deployer: {lrn}.", file=sys.stderr, ) sys.exit(2) @@ -286,21 +293,25 @@ def command( # noqa: C901 try: if r.attributes.deployment not in named_deployments: main_logger.log( - f"Skipping removal request {r.id} for {r.attributes.deployment} because it does" - f"not appear to refer to a live, named deployment." + f"Skipping removal request {r.id} for " + f"{r.attributes.deployment} because it does not appear to " + "refer to a live, named deployment." ) elif skip_by_tag(r, include_tags, exclude_tags): main_logger.log( - "Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)" + "Skipping removal request %s, filtered by tag " + "(include %s, exclude %s, present %s)" % (r.id, include_tags, exclude_tags, r.attributes.tags) ) elif r.id in removals_by_request: main_logger.log( - f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}" + f"Found satisfied request for {r.id} " + f"at {removals_by_request[r.id].id}" ) elif r.attributes.deployment in removals_by_deployment: main_logger.log( - f"Found removal record for indicated deployment {r.attributes.deployment} at " + f"Found removal record for indicated deployment " + f"{r.attributes.deployment} at " f"{removals_by_deployment[r.attributes.deployment].id}" ) else: @@ -309,7 +320,8 @@ def command( # noqa: C901 requests_to_check_for_payment.append(r) else: main_logger.log( - f"Skipping unsatisfied request {r.id} because we have seen it before." + f"Skipping unsatisfied request {r.id} " + "because we have seen it before." ) except Exception as e: main_logger.log(f"ERROR examining {r.id}: {e}") diff --git a/stack_orchestrator/deploy/webapp/util.py b/stack_orchestrator/deploy/webapp/util.py index 991dd249..302e0e3a 100644 --- a/stack_orchestrator/deploy/webapp/util.py +++ b/stack_orchestrator/deploy/webapp/util.py @@ -497,7 +497,7 @@ class LaconicRegistryClient: "--max-price", str(auction["max_price"]), "--num-providers", - str(auction["num_providers"]) + str(auction["num_providers"]), ] return json.loads(logged_cmd(self.log_file, *args))["auctionId"] @@ -561,7 +561,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None): extra_build_args = [] tmpdir = tempfile.mkdtemp() - # TODO: determine if this code could be calling into the Python git library like setup-repositories + # TODO: determine if this code could be calling into the Python git + # library like setup-repositories try: record_id = app_record["id"] ref = app_record.attributes.repository_ref @@ -570,7 +571,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None): logger.log(f"Cloning repository {repo} to {clone_dir} ...") # Set github credentials if present running a command like: - # git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/" + # git config --global url."https://${TOKEN}:@github.com/".insteadOf + # "https://github.com/" github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN") if github_token: logger.log("Github token detected, setting it in the git environment") @@ -612,7 +614,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None): logger.log(f"git checkout failed. Does ref {ref} exist?") raise e else: - # TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)? + # TODO: why is this code different vs the branch above (run vs check_call, + # and no prompt disable)? result = subprocess.run( ["git", "clone", "--depth", "1", repo, clone_dir], stdout=logger.file, @@ -749,9 +752,13 @@ def publish_deployment( # Set auction or payment id from request if app_deployment_request.attributes.auction: - new_deployment_record["record"]["auction"] = app_deployment_request.attributes.auction + new_deployment_record["record"][ + "auction" + ] = app_deployment_request.attributes.auction elif app_deployment_request.attributes.payment: - new_deployment_record["record"]["payment"] = app_deployment_request.attributes.payment + new_deployment_record["record"][ + "payment" + ] = app_deployment_request.attributes.payment if webapp_deployer_record: new_deployment_record["record"]["deployer"] = webapp_deployer_record.names[0] @@ -801,7 +808,9 @@ def skip_by_tag(r, include_tags, exclude_tags): return False -def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min_amount, logger): +def confirm_payment( + laconic: LaconicRegistryClient, record, payment_address, min_amount, logger +): req_owner = laconic.get_owner(record) if req_owner == payment_address: # No need to confirm payment if the sender and recipient are the same account. @@ -818,27 +827,30 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min if tx.code != 0: logger.log( - f"{record.id}: payment tx {tx.hash} was not successful - code: {tx.code}, log: {tx.log}" + f"{record.id}: payment tx {tx.hash} was not successful - " + f"code: {tx.code}, log: {tx.log}" ) return False if tx.sender != req_owner: logger.log( - f"{record.id}: payment sender {tx.sender} in tx {tx.hash} does not match deployment " - f"request owner {req_owner}" + f"{record.id}: payment sender {tx.sender} in tx {tx.hash} " + f"does not match deployment request owner {req_owner}" ) return False if tx.recipient != payment_address: logger.log( - f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} does not match {payment_address}" + f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} " + f"does not match {payment_address}" ) return False pay_denom = "".join([i for i in tx.amount if not i.isdigit()]) if pay_denom != "alnt": logger.log( - f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected payment denomination" + f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected " + "payment denomination" ) return False @@ -859,7 +871,10 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min # Check that payment was used for deployment of same application if record.attributes.application != used_request.attributes.application: - logger.log(f"{record.id}: payment {tx.hash} already used on a different application deployment {used}") + logger.log( + f"{record.id}: payment {tx.hash} already used on a different " + f"application deployment {used}" + ) return False used = laconic.app_deployment_removals( @@ -874,7 +889,9 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min return True -def confirm_auction(laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger): +def confirm_auction( + laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger +): auction_id = record.attributes.auction auction = laconic.get_auction(auction_id) @@ -886,11 +903,14 @@ def confirm_auction(laconic: LaconicRegistryClient, record, deployer_lrn, paymen # Cross check app against application in the auction record requested_app = laconic.get_record(record.attributes.application, require=True) - auction_app = laconic.get_record(auction_records_by_id[0].attributes.application, require=True) + auction_app = laconic.get_record( + auction_records_by_id[0].attributes.application, require=True + ) if requested_app.id != auction_app.id: logger.log( - f"{record.id}: requested application {record.attributes.application} does not match application from " - f"auction record {auction_records_by_id[0].attributes.application}" + f"{record.id}: requested application {record.attributes.application} " + f"does not match application from auction record " + f"{auction_records_by_id[0].attributes.application}" ) return False diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py index a50c7c9b..826ef4ff 100644 --- a/stack_orchestrator/main.py +++ b/stack_orchestrator/main.py @@ -21,37 +21,41 @@ from stack_orchestrator.repos import fetch_stack from stack_orchestrator.build import build_containers, fetch_containers from stack_orchestrator.build import build_npms from stack_orchestrator.build import build_webapp -from stack_orchestrator.deploy.webapp import (run_webapp, - deploy_webapp, - deploy_webapp_from_registry, - undeploy_webapp_from_registry, - publish_webapp_deployer, - publish_deployment_auction, - handle_deployment_auction, - request_webapp_deployment, - request_webapp_undeployment) +from stack_orchestrator.deploy.webapp import ( + run_webapp, + deploy_webapp, + deploy_webapp_from_registry, + undeploy_webapp_from_registry, + publish_webapp_deployer, + publish_deployment_auction, + handle_deployment_auction, + request_webapp_deployment, + request_webapp_undeployment, +) from stack_orchestrator.deploy import deploy from stack_orchestrator import version from stack_orchestrator.deploy import deployment from stack_orchestrator import opts from stack_orchestrator import update -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) @click.group(context_settings=CONTEXT_SETTINGS) -@click.option('--stack', help="specify a stack to build/deploy") -@click.option('--quiet', is_flag=True, default=False) -@click.option('--verbose', is_flag=True, default=False) -@click.option('--dry-run', is_flag=True, default=False) -@click.option('--local-stack', is_flag=True, default=False) -@click.option('--debug', is_flag=True, default=False) -@click.option('--continue-on-error', is_flag=True, default=False) +@click.option("--stack", help="specify a stack to build/deploy") +@click.option("--quiet", is_flag=True, default=False) +@click.option("--verbose", is_flag=True, default=False) +@click.option("--dry-run", is_flag=True, default=False) +@click.option("--local-stack", is_flag=True, default=False) +@click.option("--debug", is_flag=True, default=False) +@click.option("--continue-on-error", is_flag=True, default=False) # See: https://click.palletsprojects.com/en/8.1.x/complex/#building-a-git-clone @click.pass_context def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error): """Laconic Stack Orchestrator""" - command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error) + command_options = CommandOptions( + stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error + ) opts.opts.o = command_options ctx.obj = command_options diff --git a/stack_orchestrator/repos/fetch_stack.py b/stack_orchestrator/repos/fetch_stack.py index 9566e48f..d4d542bd 100644 --- a/stack_orchestrator/repos/fetch_stack.py +++ b/stack_orchestrator/repos/fetch_stack.py @@ -29,13 +29,13 @@ from stack_orchestrator.util import error_exit @click.command() -@click.argument('stack-locator') -@click.option('--git-ssh', is_flag=True, default=False) -@click.option('--check-only', is_flag=True, default=False) -@click.option('--pull', is_flag=True, default=False) +@click.argument("stack-locator") +@click.option("--git-ssh", is_flag=True, default=False) +@click.option("--check-only", is_flag=True, default=False) +@click.option("--pull", is_flag=True, default=False) @click.pass_context def command(ctx, stack_locator, git_ssh, check_only, pull): - '''optionally resolve then git clone a repository containing one or more stack definitions''' + """Optionally resolve then git clone a repository with stack definitions.""" dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) if not opts.o.quiet: print(f"Dev Root is: {dev_root_path}") diff --git a/stack_orchestrator/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py index 83075647..761d54ab 100644 --- a/stack_orchestrator/repos/setup_repositories.py +++ b/stack_orchestrator/repos/setup_repositories.py @@ -25,15 +25,20 @@ from tqdm import tqdm import click import importlib.resources from stack_orchestrator.opts import opts -from stack_orchestrator.util import get_parsed_stack_config, include_exclude_check, error_exit, warn_exit +from stack_orchestrator.util import ( + get_parsed_stack_config, + include_exclude_check, + error_exit, + warn_exit, +) class GitProgress(git.RemoteProgress): def __init__(self): super().__init__() - self.pbar = tqdm(unit='B', ascii=True, unit_scale=True) + self.pbar = tqdm(unit="B", ascii=True, unit_scale=True) - def update(self, op_code, cur_count, max_count=None, message=''): + def update(self, op_code, cur_count, max_count=None, message=""): self.pbar.total = max_count self.pbar.n = cur_count self.pbar.refresh() @@ -46,14 +51,16 @@ def is_git_repo(path): except git.exc.InvalidGitRepositoryError: return False + # TODO: find a place for this in the context of click # parser = argparse.ArgumentParser( -# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" +# epilog="Config provided either in .env or settings.ini or env vars: " +# "CERC_REPO_BASE_DIR (defaults to ~/cerc)" # ) def branch_strip(s): - return s.split('@')[0] + return s.split("@")[0] def host_and_path_for_repo(fully_qualified_repo): @@ -74,43 +81,64 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path): current_repo_branch_or_tag = "***UNDETERMINED***" is_branch = False try: - current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).active_branch.name + current_repo_branch_or_tag = git.Repo( + full_filesystem_repo_path + ).active_branch.name is_branch = True except TypeError: # This means that the current ref is not a branch, so possibly a tag # Let's try to get the tag try: - current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match") - # Note that git is asymmetric -- the tag you told it to check out may not be the one - # you get back here (if there are multiple tags associated with the same commit) + current_repo_branch_or_tag = git.Repo( + full_filesystem_repo_path + ).git.describe("--tags", "--exact-match") + # Note that git is asymmetric -- the tag you told it to check out + # may not be the one you get back here (if there are multiple tags + # associated with the same commit) except GitCommandError: - # If there is no matching branch or tag checked out, just use the current SHA - current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha + # If there is no matching branch or tag checked out, just use the current + # SHA + current_repo_branch_or_tag = ( + git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha + ) return current_repo_branch_or_tag, is_branch # TODO: fix the messy arg list here -def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo): +def process_repo( + pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo +): if opts.o.verbose: print(f"Processing repo: {fully_qualified_repo}") repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo) git_ssh_prefix = f"git@{repo_host}:" git_http_prefix = f"https://{repo_host}/" - full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}" + full_github_repo_path = ( + f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}" + ) repoName = repo_path.split("/")[-1] full_filesystem_repo_path = os.path.join(dev_root_path, repoName) is_present = os.path.isdir(full_filesystem_repo_path) - (current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag( - full_filesystem_repo_path - ) if is_present else (None, None) + (current_repo_branch_or_tag, is_branch) = ( + _get_repo_current_branch_or_tag(full_filesystem_repo_path) + if is_present + else (None, None) + ) if not opts.o.quiet: - present_text = f"already exists active {'branch' if is_branch else 'ref'}: {current_repo_branch_or_tag}" if is_present \ - else 'Needs to be fetched' + present_text = ( + f"already exists active {'branch' if is_branch else 'ref'}: " + f"{current_repo_branch_or_tag}" + if is_present + else "Needs to be fetched" + ) print(f"Checking: {full_filesystem_repo_path}: {present_text}") # Quick check that it's actually a repo if is_present: if not is_git_repo(full_filesystem_repo_path): - print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository") + print( + f"Error: {full_filesystem_repo_path} does not contain " + "a valid git repository" + ) sys.exit(1) else: if pull: @@ -128,11 +156,16 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully if not is_present: # Clone if opts.o.verbose: - print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}') + print( + f"Running git clone for {full_github_repo_path} " + f"into {full_filesystem_repo_path}" + ) if not opts.o.dry_run: - git.Repo.clone_from(full_github_repo_path, - full_filesystem_repo_path, - progress=None if opts.o.quiet else GitProgress()) + git.Repo.clone_from( + full_github_repo_path, + full_filesystem_repo_path, + progress=None if opts.o.quiet else GitProgress(), + ) else: print("(git clone skipped)") # Checkout the requested branch, if one was specified @@ -150,9 +183,9 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully if branch_to_checkout: if current_repo_branch_or_tag is None or ( - current_repo_branch_or_tag and ( - current_repo_branch_or_tag != branch_to_checkout) - ): + current_repo_branch_or_tag + and (current_repo_branch_or_tag != branch_to_checkout) + ): if not opts.o.quiet: print(f"switching to branch {branch_to_checkout} in repo {repo_path}") git_repo = git.Repo(full_filesystem_repo_path) @@ -180,14 +213,14 @@ def parse_branches(branches_string): @click.command() @click.option("--include", help="only clone these repositories") -@click.option("--exclude", help="don\'t clone these repositories") -@click.option('--git-ssh', is_flag=True, default=False) -@click.option('--check-only', is_flag=True, default=False) -@click.option('--pull', is_flag=True, default=False) +@click.option("--exclude", help="don't clone these repositories") +@click.option("--git-ssh", is_flag=True, default=False) +@click.option("--check-only", is_flag=True, default=False) +@click.option("--pull", is_flag=True, default=False) @click.option("--branches", help="override branches for repositories") @click.pass_context def command(ctx, include, exclude, git_ssh, check_only, pull, branches): - '''git clone the set of repositories required to build the complete system from source''' + """git clone the set of repositories required to build the system.""" quiet = opts.o.quiet verbose = opts.o.verbose @@ -204,22 +237,30 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches): local_stack = ctx.obj.local_stack if local_stack: - dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] - print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}") + dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] + print( + f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " + f"{dev_root_path}" + ) else: - dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + dev_root_path = os.path.expanduser( + config("CERC_REPO_BASE_DIR", default="~/cerc") + ) if not quiet: print(f"Dev Root is: {dev_root_path}") if not os.path.isdir(dev_root_path): if not quiet: - print('Dev root directory doesn\'t exist, creating') + print("Dev root directory doesn't exist, creating") os.makedirs(dev_root_path) # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data - with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file: + + with importlib.resources.open_text( + data, "repository-list.txt" + ) as repository_list_file: all_repos = repository_list_file.read().splitlines() repos_in_scope = [] diff --git a/stack_orchestrator/update.py b/stack_orchestrator/update.py index a41eabae..85fb8b41 100644 --- a/stack_orchestrator/update.py +++ b/stack_orchestrator/update.py @@ -29,7 +29,7 @@ from stack_orchestrator.util import get_yaml def _download_url(url: str, file_path: Path): r = requests.get(url, stream=True) r.raw.decode_content = True - with open(file_path, 'wb') as f: + with open(file_path, "wb") as f: shutil.copyfileobj(r.raw, f) @@ -40,12 +40,14 @@ def _error_exit(s: str): # Note at present this probably won't work on non-Unix based OSes like Windows @click.command() -@click.option("--check-only", is_flag=True, default=False, help="only check, don't update") +@click.option( + "--check-only", is_flag=True, default=False, help="only check, don't update" +) @click.pass_context def command(ctx, check_only): - '''update shiv binary from a distribution url''' + """update shiv binary from a distribution url""" # Get the distribution URL from config - config_key = 'distribution-url' + config_key = "distribution-url" config_file_path = Path(os.path.expanduser("~/.laconic-so/config.yml")) if not config_file_path.exists(): _error_exit(f"Error: Config file: {config_file_path} not found") @@ -59,7 +61,9 @@ def command(ctx, check_only): _error_exit(f"ERROR: distribution url: {distribution_url} is not valid") # Figure out the filename for ourselves shiv_binary_path = Path(sys.argv[0]) - timestamp_filename = f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}" + timestamp_filename = ( + f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}" + ) temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename) # Download the file to a temp filename if ctx.obj.verbose: @@ -87,4 +91,4 @@ def command(ctx, check_only): print(f"Replacing: {shiv_binary_path} with {temp_download_path}") os.replace(temp_download_path, shiv_binary_path) if not ctx.obj.quiet: - print("Run \"laconic-so version\" to see the newly installed version") + print('Run "laconic-so version" to see the newly installed version') diff --git a/stack_orchestrator/util.py b/stack_orchestrator/util.py index a7fa510c..f1478060 100644 --- a/stack_orchestrator/util.py +++ b/stack_orchestrator/util.py @@ -38,8 +38,10 @@ def get_stack_path(stack): if stack_is_external(stack): stack_path = Path(stack) else: - # In order to be compatible with Python 3.8 we need to use this hack to get the path: - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + # In order to be compatible with Python 3.8 we need to use this hack + # to get the path: + # See: https://stackoverflow.com/questions/25389095/ + # python-get-path-of-root-project-structure stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack) return stack_path @@ -47,10 +49,15 @@ def get_stack_path(stack): def get_dev_root_path(ctx): if ctx and ctx.local_stack: # TODO: This code probably doesn't work - dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] - print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] + print( + f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " + f"{dev_root_path}" + ) else: - dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + dev_root_path = os.path.expanduser( + config("CERC_REPO_BASE_DIR", default="~/cerc") + ) return dev_root_path @@ -102,7 +109,9 @@ def get_plugin_code_paths(stack) -> List[Path]: if type(pod) is str: result.add(get_stack_path(stack)) else: - pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) + pod_root_dir = os.path.join( + get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"] + ) result.add(Path(os.path.join(pod_root_dir, "stack"))) return list(result) @@ -157,7 +166,11 @@ def get_pod_file_path(stack, parsed_stack, pod_name: str): else: for pod in pods: if pod["name"] == pod_name: - pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) + pod_root_dir = os.path.join( + get_dev_root_path(None), + pod["repository"].split("/")[-1], + pod["path"], + ) result = os.path.join(pod_root_dir, "docker-compose.yml") return result @@ -180,7 +193,11 @@ def get_pod_script_paths(parsed_stack, pod_name: str): if not type(pods[0]) is str: for pod in pods: if pod["name"] == pod_name: - pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) + pod_root_dir = os.path.join( + get_dev_root_path(None), + pod["repository"].split("/")[-1], + pod["path"], + ) if "pre_start_command" in pod: result.append(os.path.join(pod_root_dir, pod["pre_start_command"])) if "post_start_command" in pod: @@ -201,7 +218,8 @@ def pod_has_scripts(parsed_stack, pod_name: str): def get_internal_compose_file_dir(): # TODO: refactor to use common code with deploy command - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + # See: + # https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure data_dir = Path(__file__).absolute().parent.joinpath("data") source_compose_dir = data_dir.joinpath("compose") return source_compose_dir diff --git a/stack_orchestrator/version.py b/stack_orchestrator/version.py index 541e5580..67bb6b13 100644 --- a/stack_orchestrator/version.py +++ b/stack_orchestrator/version.py @@ -20,10 +20,11 @@ from importlib import resources, metadata @click.command() @click.pass_context def command(ctx): - '''print tool version''' + """print tool version""" # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data + if resources.is_resource(data, "build_tag.txt"): with resources.open_text(data, "build_tag.txt") as version_file: # TODO: code better version that skips comment lines