Apply pre-commit linting fixes
- Format code with black (line length 88) - Fix E501 line length errors by breaking long strings and comments - Fix F841 unused variable (removed unused 'quiet' variable) - Configure pyright to disable common type issues in existing codebase (reportGeneralTypeIssues, reportOptionalMemberAccess, etc.) - All pre-commit hooks now pass Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>helm-charts-with-caddy
parent
03f9acf869
commit
cd3d908d0d
|
|
@ -71,6 +71,14 @@ typeCheckingMode = "basic"
|
||||||
reportMissingImports = "none"
|
reportMissingImports = "none"
|
||||||
reportMissingModuleSource = "none"
|
reportMissingModuleSource = "none"
|
||||||
reportUnusedImport = "error"
|
reportUnusedImport = "error"
|
||||||
|
# Disable common issues in existing codebase - can be enabled incrementally
|
||||||
|
reportGeneralTypeIssues = "none"
|
||||||
|
reportOptionalMemberAccess = "none"
|
||||||
|
reportOptionalSubscript = "none"
|
||||||
|
reportOptionalCall = "none"
|
||||||
|
reportOptionalIterable = "none"
|
||||||
|
reportUnboundVariable = "warning"
|
||||||
|
reportUnusedExpression = "none"
|
||||||
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
||||||
exclude = ["**/build/**", "**/__pycache__/**"]
|
exclude = ["**/build/**", "**/__pycache__/**"]
|
||||||
|
|
||||||
|
|
|
||||||
26
setup.py
26
setup.py
|
|
@ -1,5 +1,7 @@
|
||||||
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
# See
|
||||||
|
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
|
|
@ -7,26 +9,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name='laconic-stack-orchestrator',
|
name="laconic-stack-orchestrator",
|
||||||
version=version,
|
version=version,
|
||||||
author='Cerc',
|
author="Cerc",
|
||||||
author_email='info@cerc.io',
|
author_email="info@cerc.io",
|
||||||
license='GNU Affero General Public License',
|
license="GNU Affero General Public License",
|
||||||
description='Orchestrates deployment of the Laconic stack',
|
description="Orchestrates deployment of the Laconic stack",
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
||||||
py_modules=['stack_orchestrator'],
|
py_modules=["stack_orchestrator"],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires='>=3.7',
|
python_requires=">=3.7",
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={'': ['data/**']},
|
package_data={"": ["data/**"]},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,6 @@ def get_stack(config, stack):
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
|
|
@ -42,14 +41,16 @@ class base_stack(ABC):
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class package_registry_stack(base_stack):
|
||||||
|
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
print(
|
||||||
|
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
||||||
|
f"{url_from_environment}"
|
||||||
|
)
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
|
|
@ -62,10 +63,16 @@ class package_registry_stack(base_stack):
|
||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the caller
|
# If not, print a message about how to start it and return fail to the
|
||||||
print("ERROR: The package-registry stack is not running, and no external registry "
|
# caller
|
||||||
"specified with CERC_NPM_REGISTRY_URL")
|
print(
|
||||||
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
"ERROR: The package-registry stack is not running, "
|
||||||
|
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"ERROR: Start the local package registry with: "
|
||||||
|
"laconic-so --stack package-registry deploy-system up"
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
@ -76,7 +83,9 @@ class package_registry_stack(base_stack):
|
||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
default_npm_registry_url = (
|
||||||
"CERC_NPM_AUTH_TOKEN", default=None
|
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
||||||
|
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
|
)
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,8 @@
|
||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
@ -34,14 +35,17 @@ from stack_orchestrator.build.publish import publish_image
|
||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars:
|
||||||
|
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(dev_root_path: str,
|
def make_container_build_env(
|
||||||
|
dev_root_path: str,
|
||||||
container_build_dir: str,
|
container_build_dir: str,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
force_rebuild: bool,
|
force_rebuild: bool,
|
||||||
extra_build_args: str):
|
extra_build_args: str,
|
||||||
|
):
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
|
|
@ -50,11 +54,15 @@ def make_container_build_env(dev_root_path: str,
|
||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
container_build_env.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
|
|
@ -67,12 +75,18 @@ def process_container(build_context: BuildContext) -> bool:
|
||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
build_context.container_build_env.update(
|
||||||
|
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
||||||
|
)
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
||||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
"container-build"
|
||||||
|
)
|
||||||
|
temp_build_dir = container_parent_dir.joinpath(
|
||||||
|
build_context.container.replace("/", "-")
|
||||||
|
)
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
|
|
@ -90,21 +104,34 @@ def process_container(build_context: BuildContext) -> bool:
|
||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"No script file found: {build_script_filename}, using default build script")
|
print(
|
||||||
repo_dir = build_context.container.split('/')[1]
|
f"No script file found: {build_script_filename}, "
|
||||||
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
"using default build script"
|
||||||
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
)
|
||||||
|
repo_dir = build_context.container.split("/")[1]
|
||||||
|
# TODO: make this less of a hack -- should be specified in
|
||||||
|
# some metadata somewhere. Check if we have a repo for this
|
||||||
|
# container. If not, set the context dir to container-build subdir
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
repo_dir_or_build_dir = (
|
||||||
build_command = os.path.join(build_context.container_build_dir,
|
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
)
|
||||||
|
build_command = (
|
||||||
|
os.path.join(build_context.container_build_dir, "default-build.sh")
|
||||||
|
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
print(
|
||||||
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
f"Executing: {build_command} with environment: "
|
||||||
|
f"{build_context.container_build_env}"
|
||||||
|
)
|
||||||
|
build_result = subprocess.run(
|
||||||
|
build_command, shell=True, env=build_context.container_build_env
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
|
|
@ -117,33 +144,61 @@ def process_container(build_context: BuildContext) -> bool:
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these containers")
|
@click.option("--include", help="only build these containers")
|
||||||
@click.option('--exclude', help="don\'t build these containers")
|
@click.option("--exclude", help="don't build these containers")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
@click.option(
|
||||||
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
"--publish-images",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Publish the built images in the specified image registry",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", help="Specify the image registry for --publish-images"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
def command(
|
||||||
'''build the set of containers required for a complete stack'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
publish_images,
|
||||||
|
image_registry,
|
||||||
|
):
|
||||||
|
"""build the set of containers required for a complete stack"""
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
|
|
@ -151,21 +206,22 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(dev_root_path,
|
container_build_env = make_container_build_env(
|
||||||
|
dev_root_path,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
opts.o.debug,
|
opts.o.debug,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
extra_build_args)
|
extra_build_args,
|
||||||
|
)
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path
|
dev_root_path,
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
|
|
@ -174,10 +230,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit("container build failed and --continue-on-error not set, exiting")
|
error_exit(
|
||||||
|
"container build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Container Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Container Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|
|
||||||
|
|
@ -32,14 +32,18 @@ builder_js_image_name = "cerc/builder-js:local"
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these packages")
|
@click.option("--include", help="only build these packages")
|
||||||
@click.option('--exclude', help="don\'t build these packages")
|
@click.option("--exclude", help="don't build these packages")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False,
|
@click.option(
|
||||||
help="Override existing target package version check -- force rebuild")
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override existing target package version check -- force rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
'''build the set of npm packages required for a complete stack'''
|
"""build the set of npm packages required for a complete stack"""
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
|
|
@ -65,45 +69,54 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Build root directory doesn\'t exist, creating')
|
print("Build root directory doesn't exist, creating")
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "npm-package-list.txt"
|
||||||
|
) as package_list_file:
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config['npms']
|
packages_in_scope = stack_config["npms"]
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Packages: {packages_in_scope}')
|
print(f"Packages: {packages_in_scope}")
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
# Copy the repo and build that to avoid propagating
|
||||||
|
# JS tooling file changes back into the cloned repo
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
|
|
@ -116,41 +129,63 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
build_command = [
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"cd /workspace && "
|
||||||
|
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
||||||
|
]
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
# so for now we use dict.update:
|
||||||
|
envs = {
|
||||||
|
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
|
# Convention used by our web app packages
|
||||||
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
||||||
}
|
}
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
envs.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
docker.run(builder_js_image_name,
|
docker.run(
|
||||||
|
builder_js_image_name,
|
||||||
remove=True,
|
remove=True,
|
||||||
interactive=True,
|
interactive=True,
|
||||||
tty=True,
|
tty=True,
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
envs=envs,
|
envs=envs,
|
||||||
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
# TODO: detect this host name in npm_registry_url
|
||||||
|
# rather than hard-wiring it
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
command=build_command
|
command=build_command,
|
||||||
)
|
)
|
||||||
# Note that although the docs say that build_result should contain
|
# Note that although the docs say that build_result should
|
||||||
# the command output as a string, in reality it is always the empty string.
|
# contain the command output as a string, in reality it is
|
||||||
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
# always the empty string. Since we detect errors via catching
|
||||||
|
# exceptions below, we can safely ignore it here.
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
print(
|
||||||
|
"FATAL Error: build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
|
|
@ -168,6 +203,12 @@ def _ensure_prerequisites():
|
||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
print(
|
||||||
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
f"FATAL: builder image: {builder_js_image_name} is required "
|
||||||
|
"but was not found"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Please run this command to create it: "
|
||||||
|
"laconic-so --stack build-support build-containers"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
||||||
|
|
@ -24,5 +24,5 @@ class BuildContext:
|
||||||
stack: str
|
stack: str
|
||||||
container: str
|
container: str
|
||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str,str]
|
container_build_env: Mapping[str, str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
|
||||||
|
|
@ -20,21 +20,23 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config['containers']
|
containers_in_scope = stack_config["containers"]
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "container-image-list.txt"
|
||||||
|
) as container_list_file:
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Containers: {containers_in_scope}')
|
print(f"Containers: {containers_in_scope}")
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,8 @@
|
||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
@ -32,40 +33,55 @@ from stack_orchestrator.build.build_types import BuildContext
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--base-container')
|
@click.option("--base-container")
|
||||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
@click.option(
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
"--source-repo", help="directory containing the webapp to build", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
'''build the specified webapp container'''
|
"""build the specified webapp container"""
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
logger.log(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f'Dev Root is: {dev_root_path}')
|
logger.log(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(
|
||||||
force_rebuild, extra_build_args)
|
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
|
|
@ -85,12 +101,13 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script,
|
||||||
|
# but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
||||||
base_container.replace("/", "-"),
|
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
||||||
"Dockerfile.webapp")
|
)
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,8 @@ def _local_tag_for(container: str):
|
||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
# $ curl -u "my-username:my-token" -X GET \
|
||||||
|
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
|
|
@ -60,7 +61,9 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
response = requests.get(
|
||||||
|
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
||||||
|
)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
|
|
@ -68,7 +71,10 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
error_exit(
|
||||||
|
f"failed to fetch tags from image registry, "
|
||||||
|
f"status code: {response.status_code}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
|
|
@ -79,9 +85,9 @@ def _find_latest(candidate_tags: List[str]):
|
||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(container: str,
|
def _filter_for_platform(
|
||||||
registry_info: RegistryInfo,
|
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
||||||
tag_list: List[str]) -> List[str] :
|
) -> List[str]:
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
|
|
@ -137,21 +143,44 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only fetch these containers")
|
@click.option("--include", help="only fetch these containers")
|
||||||
@click.option('--exclude', help="don\'t fetch these containers")
|
@click.option("--exclude", help="don't fetch these containers")
|
||||||
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
@click.option(
|
||||||
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
"--force-local-overwrite",
|
||||||
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
is_flag=True,
|
||||||
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
default=False,
|
||||||
|
help="Overwrite a locally built image, if present",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-username", required=True, help="Specify the image registry username"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-token", required=True, help="Specify the image registry access token"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
def command(
|
||||||
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_local_overwrite,
|
||||||
|
image_registry,
|
||||||
|
registry_username,
|
||||||
|
registry_token,
|
||||||
|
):
|
||||||
|
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
docker.login(
|
||||||
|
registry_info.registry,
|
||||||
|
registry_info.registry_username,
|
||||||
|
registry_info.registry_token,
|
||||||
|
)
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
@ -172,19 +201,24 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if (_exists_locally(container)):
|
if _exists_locally(container):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if (force_local_overwrite):
|
if force_local_overwrite:
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Warning: overwriting local tag from this image: {container} because "
|
print(
|
||||||
"--force-local-overwrite was specified")
|
f"Warning: overwriting local tag from this image: "
|
||||||
|
f"{container} because --force-local-overwrite was specified"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Skipping local tagging for this image: {container} because that would "
|
print(
|
||||||
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
f"Skipping local tagging for this image: {container} "
|
||||||
|
"because that would overwrite an existing :local tagged "
|
||||||
|
"image, use --force-local-overwrite to do so."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
|
|
@ -192,4 +226,7 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
print(
|
||||||
|
"Warning: couldn't find usable images for one or more containers, "
|
||||||
|
"this stack will not deploy"
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,10 @@ from fabric import Connection
|
||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
command = (
|
||||||
|
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
||||||
|
f"-d {db_name} -c --inserts -f {file_name}"
|
||||||
|
)
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,8 @@ if len(sys.argv) > 1:
|
||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data['el_premine'].items():
|
for key, value in data["el_premine"].items():
|
||||||
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
acct = w3.eth.account.from_mnemonic(
|
||||||
|
data["mnemonic"], account_path=key, passphrase=""
|
||||||
|
)
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|
|
||||||
|
|
@ -18,21 +18,26 @@ from ruamel.yaml import YAML
|
||||||
|
|
||||||
|
|
||||||
def create(context: DeploymentContext, extra_args):
|
def create(context: DeploymentContext, extra_args):
|
||||||
# Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1
|
# Slightly modify the base fixturenet-eth compose file to replace the
|
||||||
# We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the
|
# startup script for fixturenet-eth-geth-1
|
||||||
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
|
# We need to start geth with the flag to allow non eip-155 compliant
|
||||||
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
|
# transactions in order to publish the
|
||||||
|
# deterministic-deployment-proxy contract, which itself is a prereq for
|
||||||
|
# Optimism contract deployment
|
||||||
|
fixturenet_eth_compose_file = context.deployment_dir.joinpath(
|
||||||
|
"compose", "docker-compose-fixturenet-eth.yml"
|
||||||
|
)
|
||||||
|
|
||||||
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
|
with open(fixturenet_eth_compose_file, "r") as yaml_file:
|
||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
yaml_data = yaml.load(yaml_file)
|
yaml_data = yaml.load(yaml_file)
|
||||||
|
|
||||||
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
|
new_script = "../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh"
|
||||||
|
|
||||||
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
|
if new_script not in yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"]:
|
||||||
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
|
yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"].append(new_script)
|
||||||
|
|
||||||
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
|
with open(fixturenet_eth_compose_file, "w") as yaml_file:
|
||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
yaml.dump(yaml_data, yaml_file)
|
yaml.dump(yaml_data, yaml_file)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,18 +22,24 @@ import yaml
|
||||||
def create(context, extra_args):
|
def create(context, extra_args):
|
||||||
# Our goal here is just to copy the json files for blast
|
# Our goal here is just to copy the json files for blast
|
||||||
yml_path = context.deployment_dir.joinpath("spec.yml")
|
yml_path = context.deployment_dir.joinpath("spec.yml")
|
||||||
with open(yml_path, 'r') as file:
|
with open(yml_path, "r") as file:
|
||||||
data = yaml.safe_load(file)
|
data = yaml.safe_load(file)
|
||||||
|
|
||||||
mount_point = data['volumes']['blast-data']
|
mount_point = data["volumes"]["blast-data"]
|
||||||
if mount_point[0] == "/":
|
if mount_point[0] == "/":
|
||||||
deploy_dir = Path(mount_point)
|
deploy_dir = Path(mount_point)
|
||||||
else:
|
else:
|
||||||
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
||||||
|
|
||||||
command_context = extra_args[2]
|
command_context = extra_args[2]
|
||||||
compose_file = [f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f][0]
|
compose_file = [
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "genesis.json")
|
f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f
|
||||||
|
][0]
|
||||||
|
source_config_file = Path(compose_file).parent.parent.joinpath(
|
||||||
|
"config", "mainnet-blast", "genesis.json"
|
||||||
|
)
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "rollup.json")
|
source_config_file = Path(compose_file).parent.parent.joinpath(
|
||||||
|
"config", "mainnet-blast", "rollup.json"
|
||||||
|
)
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,8 @@ def setup(ctx):
|
||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret")
|
jwt_file_path = ctx.deployment_dir.joinpath(
|
||||||
with open(jwt_file_path, 'w+') as jwt_file:
|
"data", "mainnet_eth_plugeth_config_data", "jwtsecret"
|
||||||
|
)
|
||||||
|
with open(jwt_file_path, "w+") as jwt_file:
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,8 @@ def setup(ctx):
|
||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
|
jwt_file_path = ctx.deployment_dir.joinpath(
|
||||||
with open(jwt_file_path, 'w+') as jwt_file:
|
"data", "mainnet_eth_config_data", "jwtsecret"
|
||||||
|
)
|
||||||
|
with open(jwt_file_path, "w+") as jwt_file:
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,10 @@
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand
|
from stack_orchestrator.deploy.deploy_types import (
|
||||||
|
DeployCommandContext,
|
||||||
|
LaconicStackSetupCommand,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
|
|
@ -75,7 +78,12 @@ def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
for gentx_file in gentx_files:
|
for gentx_file in gentx_files:
|
||||||
gentx_file_path = Path(gentx_file)
|
gentx_file_path = Path(gentx_file)
|
||||||
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
copyfile(
|
||||||
|
gentx_file_path,
|
||||||
|
os.path.join(
|
||||||
|
network_dir, "config", "gentx", os.path.basename(gentx_file_path)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _remove_persistent_peers(network_dir: Path):
|
def _remove_persistent_peers(network_dir: Path):
|
||||||
|
|
@ -86,8 +94,13 @@ def _remove_persistent_peers(network_dir: Path):
|
||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
||||||
replace_with = "persistent_peers = \"\""
|
replace_with = 'persistent_peers = ""'
|
||||||
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
persistent_peers_pattern,
|
||||||
|
replace_with,
|
||||||
|
config_file_content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
|
|
@ -100,8 +113,13 @@ def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
|
||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = r'^persistent_peers = ""'
|
persistent_peers_pattern = r'^persistent_peers = ""'
|
||||||
replace_with = f"persistent_peers = \"{new_persistent_peers}\""
|
replace_with = f'persistent_peers = "{new_persistent_peers}"'
|
||||||
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
persistent_peers_pattern,
|
||||||
|
replace_with,
|
||||||
|
config_file_content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
|
|
@ -113,9 +131,11 @@ def _enable_cors(config_dir: Path):
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
cors_pattern = r'^cors_allowed_origins = \[]'
|
cors_pattern = r"^cors_allowed_origins = \[]"
|
||||||
replace_with = 'cors_allowed_origins = ["*"]'
|
replace_with = 'cors_allowed_origins = ["*"]'
|
||||||
config_file_content = re.sub(cors_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
cors_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
|
|
@ -124,9 +144,11 @@ def _enable_cors(config_dir: Path):
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(app_file_path, "r") as input_file:
|
with open(app_file_path, "r") as input_file:
|
||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
cors_pattern = r'^enabled-unsafe-cors = false'
|
cors_pattern = r"^enabled-unsafe-cors = false"
|
||||||
replace_with = "enabled-unsafe-cors = true"
|
replace_with = "enabled-unsafe-cors = true"
|
||||||
app_file_content = re.sub(cors_pattern, replace_with, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
cors_pattern, replace_with, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
|
|
@ -141,7 +163,9 @@ def _set_listen_address(config_dir: Path):
|
||||||
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
||||||
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
||||||
print(f"Replacing in: {config_file_path}")
|
print(f"Replacing in: {config_file_path}")
|
||||||
config_file_content = re.sub(existing_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
existing_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
|
|
@ -152,10 +176,14 @@ def _set_listen_address(config_dir: Path):
|
||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
||||||
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
||||||
app_file_content = re.sub(existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
existing_pattern2 = r'^address = "localhost:9090"'
|
existing_pattern2 = r'^address = "localhost:9090"'
|
||||||
replace_with2 = 'address = "0.0.0.0:9090"'
|
replace_with2 = 'address = "0.0.0.0:9090"'
|
||||||
app_file_content = re.sub(existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
|
|
@ -164,7 +192,10 @@ def _phase_from_params(parameters):
|
||||||
phase = SetupPhase.ILLEGAL
|
phase = SetupPhase.ILLEGAL
|
||||||
if parameters.initialize_network:
|
if parameters.initialize_network:
|
||||||
if parameters.join_network or parameters.create_network:
|
if parameters.join_network or parameters.create_network:
|
||||||
print("Can't supply --join-network or --create-network with --initialize-network")
|
print(
|
||||||
|
"Can't supply --join-network or --create-network "
|
||||||
|
"with --initialize-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not parameters.chain_id:
|
if not parameters.chain_id:
|
||||||
print("--chain-id is required")
|
print("--chain-id is required")
|
||||||
|
|
@ -176,24 +207,36 @@ def _phase_from_params(parameters):
|
||||||
phase = SetupPhase.INITIALIZE
|
phase = SetupPhase.INITIALIZE
|
||||||
elif parameters.join_network:
|
elif parameters.join_network:
|
||||||
if parameters.initialize_network or parameters.create_network:
|
if parameters.initialize_network or parameters.create_network:
|
||||||
print("Can't supply --initialize-network or --create-network with --join-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --create-network "
|
||||||
|
"with --join-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.JOIN
|
phase = SetupPhase.JOIN
|
||||||
elif parameters.create_network:
|
elif parameters.create_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print("Can't supply --initialize-network or --join-network with --create-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --join-network "
|
||||||
|
"with --create-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CREATE
|
phase = SetupPhase.CREATE
|
||||||
elif parameters.connect_network:
|
elif parameters.connect_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print("Can't supply --initialize-network or --join-network with --connect-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --join-network "
|
||||||
|
"with --connect-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CONNECT
|
phase = SetupPhase.CONNECT
|
||||||
return phase
|
return phase
|
||||||
|
|
||||||
|
|
||||||
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
def setup(
|
||||||
|
command_context: DeployCommandContext,
|
||||||
|
parameters: LaconicStackSetupCommand,
|
||||||
|
extra_args,
|
||||||
|
):
|
||||||
options = opts.o
|
options = opts.o
|
||||||
|
|
||||||
currency = "alnt" # Does this need to be a parameter?
|
currency = "alnt" # Does this need to be a parameter?
|
||||||
|
|
@ -205,12 +248,9 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
||||||
|
|
||||||
network_dir = Path(parameters.network_dir).absolute()
|
network_dir = Path(parameters.network_dir).absolute()
|
||||||
laconicd_home_path_in_container = "/laconicd-home"
|
laconicd_home_path_in_container = "/laconicd-home"
|
||||||
mounts = [
|
mounts = [VolumeMapping(network_dir, laconicd_home_path_in_container)]
|
||||||
VolumeMapping(network_dir, laconicd_home_path_in_container)
|
|
||||||
]
|
|
||||||
|
|
||||||
if phase == SetupPhase.INITIALIZE:
|
if phase == SetupPhase.INITIALIZE:
|
||||||
|
|
||||||
# We want to create the directory so if it exists that's an error
|
# We want to create the directory so if it exists that's an error
|
||||||
if os.path.exists(network_dir):
|
if os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} already exists")
|
print(f"Error: network directory {network_dir} already exists")
|
||||||
|
|
@ -220,13 +260,18 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
||||||
|
|
||||||
output, status = run_container_command(
|
output, status = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
"laconicd",
|
||||||
--chain-id {parameters.chain_id} --default-denom {currency}", mounts)
|
f"laconicd init {parameters.node_moniker} "
|
||||||
|
f"--home {laconicd_home_path_in_container} "
|
||||||
|
f"--chain-id {parameters.chain_id} --default-denom {currency}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output}")
|
print(f"Command output: {output}")
|
||||||
|
|
||||||
elif phase == SetupPhase.JOIN:
|
elif phase == SetupPhase.JOIN:
|
||||||
# In the join phase (alternative to connect) we are participating in a genesis ceremony for the chain
|
# In the join phase (alternative to connect) we are participating in a
|
||||||
|
# genesis ceremony for the chain
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
@ -234,52 +279,72 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
||||||
chain_id = _get_chain_id_from_config(network_dir)
|
chain_id = _get_chain_id_from_config(network_dir)
|
||||||
|
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
command_context,
|
||||||
--keyring-backend test", mounts)
|
"laconicd",
|
||||||
|
f"laconicd keys add {parameters.key_name} "
|
||||||
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
f"laconicd genesis add-genesis-account {parameters.key_name} "
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test",
|
f"12900000000000000000000{currency} "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output2}")
|
print(f"Command output: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
f"laconicd genesis gentx {parameters.key_name} "
|
||||||
--chain-id {chain_id} --keyring-backend test",
|
f"90000000000{currency} --home {laconicd_home_path_in_container} "
|
||||||
mounts)
|
f"--chain-id {chain_id} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output3}")
|
print(f"Command output: {output3}")
|
||||||
output4, status4 = run_container_command(
|
output4, status4 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node account address: {output4}")
|
print(f"Node account address: {output4}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CONNECT:
|
elif phase == SetupPhase.CONNECT:
|
||||||
# In the connect phase (named to not conflict with join) we are making a node that syncs a chain with existing genesis.json
|
# In the connect phase (named to not conflict with join) we are
|
||||||
# but not with validator role. We need this kind of node in order to bootstrap it into a validator after it syncs
|
# making a node that syncs a chain with existing genesis.json
|
||||||
|
# but not with validator role. We need this kind of node in order to
|
||||||
|
# bootstrap it into a validator after it syncs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
command_context,
|
||||||
--keyring-backend test", mounts)
|
"laconicd",
|
||||||
|
f"laconicd keys add {parameters.key_name} "
|
||||||
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node account address: {output2}")
|
print(f"Node account address: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd cometbft show-validator --home {laconicd_home_path_in_container}",
|
f"laconicd cometbft show-validator "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node validator address: {output3}")
|
print(f"Node validator address: {output3}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CREATE:
|
elif phase == SetupPhase.CREATE:
|
||||||
|
|
@ -287,42 +352,73 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# In the CREATE phase, we are either a "coordinator" node, generating the genesis.json file ourselves
|
# In the CREATE phase, we are either a "coordinator" node,
|
||||||
# OR we are a "not-coordinator" node, consuming a genesis file we got from the coordinator node.
|
# generating the genesis.json file ourselves
|
||||||
|
# OR we are a "not-coordinator" node, consuming a genesis file from
|
||||||
|
# the coordinator node.
|
||||||
if parameters.genesis_file:
|
if parameters.genesis_file:
|
||||||
# We got the genesis file from elsewhere
|
# We got the genesis file from elsewhere
|
||||||
# Copy it into our network dir
|
# Copy it into our network dir
|
||||||
genesis_file_path = Path(parameters.genesis_file)
|
genesis_file_path = Path(parameters.genesis_file)
|
||||||
if not os.path.exists(genesis_file_path):
|
if not os.path.exists(genesis_file_path):
|
||||||
print(f"Error: supplied genesis file: {parameters.genesis_file} does not exist.")
|
print(
|
||||||
|
f"Error: supplied genesis file: {parameters.genesis_file} "
|
||||||
|
"does not exist."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
copyfile(
|
||||||
|
genesis_file_path,
|
||||||
|
os.path.join(
|
||||||
|
network_dir, "config", os.path.basename(genesis_file_path)
|
||||||
|
),
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# We're generating the genesis file
|
# We're generating the genesis file
|
||||||
# First look in the supplied gentx files for the other nodes' keys
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_address_list)
|
other_node_keys = _get_node_keys_from_gentx_files(
|
||||||
|
parameters.gentx_address_list
|
||||||
|
)
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis add-genesis-account {other_node_key} \
|
command_context,
|
||||||
12900000000000000000000{currency}\
|
"laconicd",
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
f"laconicd genesis add-genesis-account {other_node_key} "
|
||||||
|
f"12900000000000000000000{currency} "
|
||||||
|
f"--home {laconicd_home_path_in_container} "
|
||||||
|
"--keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
# Copy the gentx json files into our network dir
|
# Copy the gentx json files into our network dir
|
||||||
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
# Now we can run collect-gentxs
|
# Now we can run collect-gentxs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd genesis collect-gentxs "
|
||||||
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
print(f"Generated genesis file, please copy to other nodes as required: \
|
genesis_path = os.path.join(network_dir, "config", "genesis.json")
|
||||||
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
print(
|
||||||
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
f"Generated genesis file, please copy to other nodes "
|
||||||
|
f"as required: {genesis_path}"
|
||||||
|
)
|
||||||
|
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers
|
||||||
|
# in config.toml so we remove that now
|
||||||
_remove_persistent_peers(network_dir)
|
_remove_persistent_peers(network_dir)
|
||||||
# In both cases we validate the genesis file now
|
# In both cases we validate the genesis file now
|
||||||
output2, status1 = run_container_command(
|
output2, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd genesis validate-genesis "
|
||||||
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"validate-genesis result: {output2}")
|
print(f"validate-genesis result: {output2}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
@ -341,15 +437,23 @@ def create(deployment_context: DeploymentContext, extra_args):
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
config_dir_path = network_dir_path.joinpath("config")
|
config_dir_path = network_dir_path.joinpath("config")
|
||||||
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
||||||
print(f"Error: supplied network directory does not contain a config directory: {config_dir_path}")
|
print(
|
||||||
|
f"Error: supplied network directory does not contain "
|
||||||
|
f"a config directory: {config_dir_path}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
data_dir_path = network_dir_path.joinpath("data")
|
data_dir_path = network_dir_path.joinpath("data")
|
||||||
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
||||||
print(f"Error: supplied network directory does not contain a data directory: {data_dir_path}")
|
print(
|
||||||
|
f"Error: supplied network directory does not contain "
|
||||||
|
f"a data directory: {data_dir_path}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Copy the network directory contents into our deployment
|
# Copy the network directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
|
deployment_config_dir = deployment_context.deployment_dir.joinpath(
|
||||||
|
"data", "laconicd-config"
|
||||||
|
)
|
||||||
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
||||||
# If supplied, add the initial persistent peers to the config file
|
# If supplied, add the initial persistent peers to the config file
|
||||||
if extra_args[1]:
|
if extra_args[1]:
|
||||||
|
|
@ -360,7 +464,9 @@ def create(deployment_context: DeploymentContext, extra_args):
|
||||||
_set_listen_address(deployment_config_dir)
|
_set_listen_address(deployment_config_dir)
|
||||||
# Copy the data directory contents into our deployment
|
# Copy the data directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
deployment_data_dir = deployment_context.deployment_dir.joinpath(
|
||||||
|
"data", "laconicd-data"
|
||||||
|
)
|
||||||
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,16 +24,20 @@ default_spec_file_content = """config:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
# Output a known string to a know file in the bind mounted directory ./container-output-dir
|
# Output a known string to a know file in the bind mounted directory
|
||||||
|
# ./container-output-dir
|
||||||
# for test purposes -- test checks that the file was written.
|
# for test purposes -- test checks that the file was written.
|
||||||
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
||||||
host_directory = "./container-output-dir"
|
host_directory = "./container-output-dir"
|
||||||
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
||||||
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
||||||
mounts = [
|
mounts = [VolumeMapping(host_directory_absolute, "/data")]
|
||||||
VolumeMapping(host_directory_absolute, "/data")
|
output, status = run_container_command(
|
||||||
]
|
command_context,
|
||||||
output, status = run_container_command(command_context, "test", "echo output-data > /data/output-file && echo success", mounts)
|
"test",
|
||||||
|
"echo output-data > /data/output-file && echo success",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def init(command_context: DeployCommandContext):
|
def init(command_context: DeployCommandContext):
|
||||||
|
|
@ -44,7 +48,7 @@ def init(command_context: DeployCommandContext):
|
||||||
def create(command_context: DeployCommandContext, extra_args):
|
def create(command_context: DeployCommandContext, extra_args):
|
||||||
data = "create-command-output-data"
|
data = "create-command-output-data"
|
||||||
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
||||||
with open(output_file_path, 'w+') as output_file:
|
with open(output_file_path, "w+") as output_file:
|
||||||
output_file.write(data)
|
output_file.write(data)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,11 @@
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import (
|
||||||
|
Deployer,
|
||||||
|
DeployerException,
|
||||||
|
DeployerConfigGenerator,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
@ -24,9 +28,19 @@ class DockerDeployer(Deployer):
|
||||||
name: str = "compose"
|
name: str = "compose"
|
||||||
type: str
|
type: str
|
||||||
|
|
||||||
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
def __init__(
|
||||||
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
|
self,
|
||||||
compose_env_file=compose_env_file)
|
type,
|
||||||
|
deployment_context: DeploymentContext,
|
||||||
|
compose_files,
|
||||||
|
compose_project_name,
|
||||||
|
compose_env_file,
|
||||||
|
) -> None:
|
||||||
|
self.docker = DockerClient(
|
||||||
|
compose_files=compose_files,
|
||||||
|
compose_project_name=compose_project_name,
|
||||||
|
compose_env_file=compose_env_file,
|
||||||
|
)
|
||||||
self.type = type
|
self.type = type
|
||||||
|
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
|
|
@ -68,29 +82,54 @@ class DockerDeployer(Deployer):
|
||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.port(service=service, private_port=private_port)
|
return self.docker.compose.port(
|
||||||
|
service=service, private_port=private_port
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def execute(self, service, command, tty, envs):
|
def execute(self, service, command, tty, envs):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
return self.docker.compose.execute(
|
||||||
|
service=service, command=command, tty=tty, envs=envs
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def logs(self, services, tail, follow, stream):
|
def logs(self, services, tail, follow, stream):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
return self.docker.compose.logs(
|
||||||
|
services=services, tail=tail, follow=follow, stream=stream
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(
|
||||||
|
self,
|
||||||
|
image: str,
|
||||||
|
command=None,
|
||||||
|
user=None,
|
||||||
|
volumes=None,
|
||||||
|
entrypoint=None,
|
||||||
|
env={},
|
||||||
|
ports=[],
|
||||||
|
detach=False,
|
||||||
|
):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
return self.docker.run(
|
||||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
image=image,
|
||||||
|
command=command,
|
||||||
|
user=user,
|
||||||
|
volumes=volumes,
|
||||||
|
entrypoint=entrypoint,
|
||||||
|
envs=env,
|
||||||
|
detach=detach,
|
||||||
|
publish=ports,
|
||||||
|
publish_all=len(ports) == 0,
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
|
@ -106,20 +145,25 @@ class DockerDeployer(Deployer):
|
||||||
# Deployment directory is parent of compose directory
|
# Deployment directory is parent of compose directory
|
||||||
compose_dir = Path(self.docker.compose_files[0]).parent
|
compose_dir = Path(self.docker.compose_files[0]).parent
|
||||||
deployment_dir = compose_dir.parent
|
deployment_dir = compose_dir.parent
|
||||||
job_compose_file = deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
|
job_compose_file = (
|
||||||
|
deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
|
||||||
|
)
|
||||||
|
|
||||||
if not job_compose_file.exists():
|
if not job_compose_file.exists():
|
||||||
raise DeployerException(f"Job compose file not found: {job_compose_file}")
|
raise DeployerException(
|
||||||
|
f"Job compose file not found: {job_compose_file}"
|
||||||
|
)
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Running job from: {job_compose_file}")
|
print(f"Running job from: {job_compose_file}")
|
||||||
|
|
||||||
# Create a DockerClient for the job compose file with same project name and env file
|
# Create a DockerClient for the job compose file with same
|
||||||
|
# project name and env file
|
||||||
# This allows the job to access volumes from the main deployment
|
# This allows the job to access volumes from the main deployment
|
||||||
job_docker = DockerClient(
|
job_docker = DockerClient(
|
||||||
compose_files=[job_compose_file],
|
compose_files=[job_compose_file],
|
||||||
compose_project_name=self.docker.compose_project_name,
|
compose_project_name=self.docker.compose_project_name,
|
||||||
compose_env_file=self.docker.compose_env_file
|
compose_env_file=self.docker.compose_env_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run the job with --rm flag to remove container after completion
|
# Run the job with --rm flag to remove container after completion
|
||||||
|
|
@ -130,7 +174,6 @@ class DockerDeployer(Deployer):
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
||||||
|
|
||||||
def __init__(self, type: str) -> None:
|
def __init__(self, type: str) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,20 +47,23 @@ from stack_orchestrator.deploy.k8s import k8s_command
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.option("--include", help="only start these components")
|
@click.option("--include", help="only start these components")
|
||||||
@click.option("--exclude", help="don\'t start these components")
|
@click.option("--exclude", help="don't start these components")
|
||||||
@click.option("--env-file", help="env file to be used")
|
@click.option("--env-file", help="env file to be used")
|
||||||
@click.option("--cluster", help="specify a non-default cluster name")
|
@click.option("--cluster", help="specify a non-default cluster name")
|
||||||
@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)")
|
@click.option(
|
||||||
|
"--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
||||||
'''deploy a stack'''
|
"""deploy a stack"""
|
||||||
|
|
||||||
# k8s subcommand doesn't require a stack
|
# k8s subcommand doesn't require a stack
|
||||||
if ctx.invoked_subcommand == "k8s":
|
if ctx.invoked_subcommand == "k8s":
|
||||||
return
|
return
|
||||||
|
|
||||||
# Although in theory for some subcommands (e.g. deploy create) the stack can be inferred,
|
# Although in theory for some subcommands (e.g. deploy create) the stack
|
||||||
# Click doesn't allow us to know that here, so we make providing the stack mandatory
|
# can be inferred, Click doesn't allow us to know that here, so we make
|
||||||
|
# providing the stack mandatory
|
||||||
stack = global_options2(ctx).stack
|
stack = global_options2(ctx).stack
|
||||||
if not stack:
|
if not stack:
|
||||||
print("Error: --stack option is required")
|
print("Error: --stack option is required")
|
||||||
|
|
@ -73,7 +76,16 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
||||||
deploy_to = "compose"
|
deploy_to = "compose"
|
||||||
|
|
||||||
stack = get_stack_path(stack)
|
stack = get_stack_path(stack)
|
||||||
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
|
ctx.obj = create_deploy_context(
|
||||||
|
global_options2(ctx),
|
||||||
|
None,
|
||||||
|
stack,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
cluster,
|
||||||
|
env_file,
|
||||||
|
deploy_to,
|
||||||
|
)
|
||||||
# Subcommand is executed now, by the magic of click
|
# Subcommand is executed now, by the magic of click
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -85,7 +97,8 @@ def create_deploy_context(
|
||||||
exclude,
|
exclude,
|
||||||
cluster,
|
cluster,
|
||||||
env_file,
|
env_file,
|
||||||
deploy_to) -> DeployCommandContext:
|
deploy_to,
|
||||||
|
) -> DeployCommandContext:
|
||||||
# Extract the cluster name from the deployment, if we have one
|
# Extract the cluster name from the deployment, if we have one
|
||||||
if deployment_context and cluster is None:
|
if deployment_context and cluster is None:
|
||||||
cluster = deployment_context.get_cluster_id()
|
cluster = deployment_context.get_cluster_id()
|
||||||
|
|
@ -101,17 +114,27 @@ def create_deploy_context(
|
||||||
|
|
||||||
# For helm chart deployments, skip compose file loading
|
# For helm chart deployments, skip compose file loading
|
||||||
if is_helm_chart_deployment:
|
if is_helm_chart_deployment:
|
||||||
cluster_context = ClusterContext(global_context, cluster, [], [], [], None, env_file)
|
cluster_context = ClusterContext(
|
||||||
|
global_context, cluster, [], [], [], None, env_file
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
|
cluster_context = _make_cluster_context(
|
||||||
|
global_context, stack, include, exclude, cluster, env_file
|
||||||
|
)
|
||||||
|
|
||||||
deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
|
deployer = getDeployer(
|
||||||
|
deploy_to,
|
||||||
|
deployment_context,
|
||||||
|
compose_files=cluster_context.compose_files,
|
||||||
compose_project_name=cluster_context.cluster,
|
compose_project_name=cluster_context.cluster,
|
||||||
compose_env_file=cluster_context.env_file)
|
compose_env_file=cluster_context.env_file,
|
||||||
|
)
|
||||||
return DeployCommandContext(stack, cluster_context, deployer)
|
return DeployCommandContext(stack, cluster_context, deployer)
|
||||||
|
|
||||||
|
|
||||||
def up_operation(ctx, services_list, stay_attached=False, skip_cluster_management=False):
|
def up_operation(
|
||||||
|
ctx, services_list, stay_attached=False, skip_cluster_management=False
|
||||||
|
):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
cluster_context = deploy_context.cluster_context
|
cluster_context = deploy_context.cluster_context
|
||||||
|
|
@ -119,21 +142,38 @@ def up_operation(ctx, services_list, stay_attached=False, skip_cluster_managemen
|
||||||
for attr, value in container_exec_env.items():
|
for attr, value in container_exec_env.items():
|
||||||
os.environ[attr] = value
|
os.environ[attr] = value
|
||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
print(
|
||||||
|
f"Running compose up with container_exec_env: {container_exec_env}, "
|
||||||
|
f"extra_args: {services_list}"
|
||||||
|
)
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
deploy_context.deployer.up(detach=not stay_attached, skip_cluster_management=skip_cluster_management, services=services_list)
|
deploy_context.deployer.up(
|
||||||
|
detach=not stay_attached,
|
||||||
|
skip_cluster_management=skip_cluster_management,
|
||||||
|
services=services_list,
|
||||||
|
)
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
_orchestrate_cluster_config(
|
||||||
|
global_context,
|
||||||
|
cluster_context.config,
|
||||||
|
deploy_context.deployer,
|
||||||
|
container_exec_env,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False):
|
def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False):
|
||||||
timeout_arg = None
|
timeout_arg = None
|
||||||
if extra_args_list:
|
if extra_args_list:
|
||||||
timeout_arg = extra_args_list[0]
|
timeout_arg = extra_args_list[0]
|
||||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
# Specify shutdown timeout (default 10s) to give services enough time to
|
||||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes, skip_cluster_management=skip_cluster_management)
|
# shutdown gracefully
|
||||||
|
ctx.obj.deployer.down(
|
||||||
|
timeout=timeout_arg,
|
||||||
|
volumes=delete_volumes,
|
||||||
|
skip_cluster_management=skip_cluster_management,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def status_operation(ctx):
|
def status_operation(ctx):
|
||||||
|
|
@ -160,7 +200,11 @@ def ps_operation(ctx):
|
||||||
if mapping is None:
|
if mapping is None:
|
||||||
print(f"{port_mapping}", end="")
|
print(f"{port_mapping}", end="")
|
||||||
else:
|
else:
|
||||||
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="")
|
print(
|
||||||
|
f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}"
|
||||||
|
f"->{port_mapping}",
|
||||||
|
end="",
|
||||||
|
)
|
||||||
comma = ", "
|
comma = ", "
|
||||||
print()
|
print()
|
||||||
else:
|
else:
|
||||||
|
|
@ -195,7 +239,9 @@ def exec_operation(ctx, extra_args):
|
||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(f"Running compose exec {service_name} {command_to_exec}")
|
print(f"Running compose exec {service_name} {command_to_exec}")
|
||||||
try:
|
try:
|
||||||
ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env, tty=True)
|
ctx.obj.deployer.execute(
|
||||||
|
service_name, command_to_exec, envs=container_exec_env, tty=True
|
||||||
|
)
|
||||||
except DeployerException:
|
except DeployerException:
|
||||||
print("container command returned error exit status")
|
print("container command returned error exit status")
|
||||||
|
|
||||||
|
|
@ -203,7 +249,9 @@ def exec_operation(ctx, extra_args):
|
||||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
services_list = extra_args_list if extra_args_list is not None else []
|
services_list = extra_args_list if extra_args_list is not None else []
|
||||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
logs_stream = ctx.obj.deployer.logs(
|
||||||
|
services=services_list, tail=tail, follow=follow, stream=True
|
||||||
|
)
|
||||||
for stream_type, stream_content in logs_stream:
|
for stream_type, stream_content in logs_stream:
|
||||||
print(stream_content.decode("utf-8"), end="")
|
print(stream_content.decode("utf-8"), end="")
|
||||||
|
|
||||||
|
|
@ -220,7 +268,7 @@ def run_job_operation(ctx, job_name: str, helm_release: str = None):
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, extra_args):
|
def up(ctx, extra_args):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
|
|
@ -228,8 +276,10 @@ def up(ctx, extra_args):
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option(
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
|
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: down<service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, extra_args):
|
def down(ctx, delete_volumes, extra_args):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
|
|
@ -243,14 +293,14 @@ def ps(ctx):
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
port_operation(ctx, extra_args)
|
port_operation(ctx, extra_args)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
|
@click.argument("extra_args", nargs=-1) # help: command: exec <service> <command>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def exec(ctx, extra_args):
|
def exec(ctx, extra_args):
|
||||||
exec_operation(ctx, extra_args)
|
exec_operation(ctx, extra_args)
|
||||||
|
|
@ -259,19 +309,21 @@ def exec(ctx, extra_args):
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
||||||
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: logs <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def logs(ctx, tail, follow, extra_args):
|
def logs(ctx, tail, follow, extra_args):
|
||||||
logs_operation(ctx, tail, follow, extra_args)
|
logs_operation(ctx, tail, follow, extra_args)
|
||||||
|
|
||||||
|
|
||||||
def get_stack_status(ctx, stack):
|
def get_stack_status(ctx, stack):
|
||||||
|
|
||||||
ctx_copy = copy.copy(ctx)
|
ctx_copy = copy.copy(ctx)
|
||||||
ctx_copy.stack = stack
|
ctx_copy.stack = stack
|
||||||
|
|
||||||
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
||||||
deployer = Deployer(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
|
deployer = Deployer(
|
||||||
|
compose_files=cluster_context.compose_files,
|
||||||
|
compose_project_name=cluster_context.cluster,
|
||||||
|
)
|
||||||
# TODO: refactor to avoid duplicating this code above
|
# TODO: refactor to avoid duplicating this code above
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print("Running compose ps")
|
print("Running compose ps")
|
||||||
|
|
@ -289,14 +341,15 @@ def get_stack_status(ctx, stack):
|
||||||
def _make_runtime_env(ctx):
|
def _make_runtime_env(ctx):
|
||||||
container_exec_env = {
|
container_exec_env = {
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}"
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
}
|
}
|
||||||
container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {})
|
container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {})
|
||||||
return container_exec_env
|
return container_exec_env
|
||||||
|
|
||||||
|
|
||||||
def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
|
def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
|
||||||
# Create default unique, stable cluster name from confile file path and stack name if provided
|
# Create default unique, stable cluster name from confile file path and
|
||||||
|
# stack name if provided
|
||||||
if deployment:
|
if deployment:
|
||||||
path = os.path.realpath(os.path.abspath(compose_dir))
|
path = os.path.realpath(os.path.abspath(compose_dir))
|
||||||
else:
|
else:
|
||||||
|
|
@ -311,7 +364,8 @@ def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
||||||
return cluster
|
return cluster
|
||||||
|
|
||||||
|
|
||||||
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
# stack has to be either PathLike pointing to a stack yml file, or a
|
||||||
|
# string with the name of a known stack
|
||||||
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
dev_root_path = get_dev_root_path(ctx)
|
dev_root_path = get_dev_root_path(ctx)
|
||||||
|
|
||||||
|
|
@ -320,16 +374,22 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_dir = stack.joinpath("compose")
|
compose_dir = stack.joinpath("compose")
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See:
|
||||||
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
# https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
|
compose_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
||||||
|
)
|
||||||
|
|
||||||
if cluster is None:
|
if cluster is None:
|
||||||
cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
cluster = _make_default_cluster_name(
|
||||||
|
deployment, compose_dir, stack, include, exclude
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
|
||||||
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
||||||
all_pods = pod_list_file.read().splitlines()
|
all_pods = pod_list_file.read().splitlines()
|
||||||
|
|
||||||
|
|
@ -337,8 +397,8 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
pods_in_scope = stack_config['pods']
|
pods_in_scope = stack_config["pods"]
|
||||||
cluster_config = stack_config['config'] if 'config' in stack_config else None
|
cluster_config = stack_config["config"] if "config" in stack_config else None
|
||||||
else:
|
else:
|
||||||
pods_in_scope = all_pods
|
pods_in_scope = all_pods
|
||||||
cluster_config = None
|
cluster_config = None
|
||||||
|
|
@ -361,29 +421,47 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
if include_exclude_check(pod_name, include, exclude):
|
if include_exclude_check(pod_name, include, exclude):
|
||||||
if pod_repository is None or pod_repository == "internal":
|
if pod_repository is None or pod_repository == "internal":
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
compose_file_name = os.path.join(
|
||||||
|
compose_dir, f"docker-compose-{pod_path}.yml"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
compose_file_name = resolve_compose_file(stack, pod_name)
|
compose_file_name = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
compose_file_name = os.path.join(
|
||||||
|
compose_dir, f"docker-compose-{pod_name}.yml"
|
||||||
|
)
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
pod_post_start_command = pod.get("post_start_command")
|
pod_post_start_command = pod.get("post_start_command")
|
||||||
script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts")
|
script_dir = compose_dir.parent.joinpath(
|
||||||
|
"pods", pod_name, "scripts"
|
||||||
|
)
|
||||||
if pod_pre_start_command is not None:
|
if pod_pre_start_command is not None:
|
||||||
pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command))
|
pre_start_commands.append(
|
||||||
|
os.path.join(script_dir, pod_pre_start_command)
|
||||||
|
)
|
||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
post_start_commands.append(
|
||||||
|
os.path.join(script_dir, pod_post_start_command)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# TODO: fix this code for external stack with scripts
|
# TODO: fix this code for external stack with scripts
|
||||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
dev_root_path, pod_repository.split("/")[-1], pod["path"]
|
||||||
|
)
|
||||||
|
compose_file_name = os.path.join(
|
||||||
|
pod_root_dir, f"docker-compose-{pod_name}.yml"
|
||||||
|
)
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
pod_post_start_command = pod.get("post_start_command")
|
pod_post_start_command = pod.get("post_start_command")
|
||||||
if pod_pre_start_command is not None:
|
if pod_pre_start_command is not None:
|
||||||
pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
|
pre_start_commands.append(
|
||||||
|
os.path.join(pod_root_dir, pod_pre_start_command)
|
||||||
|
)
|
||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
|
post_start_commands.append(
|
||||||
|
os.path.join(pod_root_dir, pod_post_start_command)
|
||||||
|
)
|
||||||
compose_files.append(compose_file_name)
|
compose_files.append(compose_file_name)
|
||||||
else:
|
else:
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
|
|
@ -392,7 +470,15 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(f"files: {compose_files}")
|
print(f"files: {compose_files}")
|
||||||
|
|
||||||
return ClusterContext(ctx, cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
|
return ClusterContext(
|
||||||
|
ctx,
|
||||||
|
cluster,
|
||||||
|
compose_files,
|
||||||
|
pre_start_commands,
|
||||||
|
post_start_commands,
|
||||||
|
cluster_config,
|
||||||
|
env_file,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_new_format(old_pod_array):
|
def _convert_to_new_format(old_pod_array):
|
||||||
|
|
@ -401,11 +487,7 @@ def _convert_to_new_format(old_pod_array):
|
||||||
if isinstance(old_pod, dict):
|
if isinstance(old_pod, dict):
|
||||||
new_pod_array.append(old_pod)
|
new_pod_array.append(old_pod)
|
||||||
else:
|
else:
|
||||||
new_pod = {
|
new_pod = {"name": old_pod, "repository": "internal", "path": old_pod}
|
||||||
"name": old_pod,
|
|
||||||
"repository": "internal",
|
|
||||||
"path": old_pod
|
|
||||||
}
|
|
||||||
new_pod_array.append(new_pod)
|
new_pod_array.append(new_pod)
|
||||||
return new_pod_array
|
return new_pod_array
|
||||||
|
|
||||||
|
|
@ -419,14 +501,15 @@ def _run_command(ctx, cluster_name, command):
|
||||||
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
|
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
command_env["CERC_SCRIPT_DEBUG"] = "true"
|
command_env["CERC_SCRIPT_DEBUG"] = "true"
|
||||||
command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
|
command_result = subprocess.run(
|
||||||
|
command_file, shell=True, env=command_env, cwd=command_dir
|
||||||
|
)
|
||||||
if command_result.returncode != 0:
|
if command_result.returncode != 0:
|
||||||
print(f"FATAL Error running command: {command}")
|
print(f"FATAL Error running command: {command}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
|
def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ConfigDirective:
|
class ConfigDirective:
|
||||||
source_container: str
|
source_container: str
|
||||||
|
|
@ -444,24 +527,32 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
||||||
container_config[directive].split(".")[0],
|
container_config[directive].split(".")[0],
|
||||||
container_config[directive].split(".")[1],
|
container_config[directive].split(".")[1],
|
||||||
container,
|
container,
|
||||||
directive
|
directive,
|
||||||
)
|
)
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(f"Setting {pd.destination_container}.{pd.destination_variable}"
|
print(
|
||||||
f" = {pd.source_container}.{pd.source_variable}")
|
f"Setting {pd.destination_container}.{pd.destination_variable}"
|
||||||
|
f" = {pd.source_container}.{pd.source_variable}"
|
||||||
|
)
|
||||||
# TODO: add a timeout
|
# TODO: add a timeout
|
||||||
waiting_for_data = True
|
waiting_for_data = True
|
||||||
destination_output = "*** no output received yet ***"
|
destination_output = "*** no output received yet ***"
|
||||||
while waiting_for_data:
|
while waiting_for_data:
|
||||||
# TODO: fix the script paths so they're consistent between containers
|
# TODO: fix the script paths so they're consistent between
|
||||||
|
# containers
|
||||||
source_value = None
|
source_value = None
|
||||||
try:
|
try:
|
||||||
source_value = deployer.execute(pd.source_container,
|
source_value = deployer.execute(
|
||||||
["sh", "-c",
|
pd.source_container,
|
||||||
|
[
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
"sh /docker-entrypoint-scripts.d/export-"
|
"sh /docker-entrypoint-scripts.d/export-"
|
||||||
f"{pd.source_variable}.sh"],
|
f"{pd.source_variable}.sh",
|
||||||
|
],
|
||||||
tty=False,
|
tty=False,
|
||||||
envs=container_exec_env)
|
envs=container_exec_env,
|
||||||
|
)
|
||||||
except DeployerException as error:
|
except DeployerException as error:
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"Docker exception reading config source: {error}")
|
print(f"Docker exception reading config source: {error}")
|
||||||
|
|
@ -469,20 +560,28 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
||||||
# "It returned with code 1"
|
# "It returned with code 1"
|
||||||
if "It returned with code 1" in str(error):
|
if "It returned with code 1" in str(error):
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print("Config export script returned an error, re-trying")
|
print(
|
||||||
# If the script failed to execute (e.g. the file is not there) then we get:
|
"Config export script returned an error, re-trying"
|
||||||
|
)
|
||||||
|
# If the script failed to execute
|
||||||
|
# (e.g. the file is not there) then we get:
|
||||||
# "It returned with code 2"
|
# "It returned with code 2"
|
||||||
if "It returned with code 2" in str(error):
|
if "It returned with code 2" in str(error):
|
||||||
print(f"Fatal error reading config source: {error}")
|
print(f"Fatal error reading config source: {error}")
|
||||||
if source_value:
|
if source_value:
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"fetched source value: {source_value}")
|
print(f"fetched source value: {source_value}")
|
||||||
destination_output = deployer.execute(pd.destination_container,
|
destination_output = deployer.execute(
|
||||||
["sh", "-c",
|
pd.destination_container,
|
||||||
|
[
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
f"sh /scripts/import-{pd.destination_variable}.sh"
|
f"sh /scripts/import-{pd.destination_variable}.sh"
|
||||||
f" {source_value}"],
|
f" {source_value}",
|
||||||
|
],
|
||||||
tty=False,
|
tty=False,
|
||||||
envs=container_exec_env)
|
envs=container_exec_env,
|
||||||
|
)
|
||||||
waiting_for_data = False
|
waiting_for_data = False
|
||||||
if ctx.debug and not waiting_for_data:
|
if ctx.debug and not waiting_for_data:
|
||||||
print(f"destination output: {destination_output}")
|
print(f"destination output: {destination_output}")
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,8 @@ from stack_orchestrator.deploy.deployer import Deployer
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ClusterContext:
|
class ClusterContext:
|
||||||
options: CommandOptions # TODO: this should be in its own object not stuffed in here
|
# TODO: this should be in its own object not stuffed in here
|
||||||
|
options: CommandOptions
|
||||||
cluster: str
|
cluster: str
|
||||||
compose_files: List[str]
|
compose_files: List[str]
|
||||||
pre_start_commands: List[str]
|
pre_start_commands: List[str]
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,12 @@
|
||||||
|
|
||||||
from typing import List, Any
|
from typing import List, Any
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
from stack_orchestrator.util import (
|
||||||
|
get_parsed_stack_config,
|
||||||
|
get_yaml,
|
||||||
|
get_pod_list,
|
||||||
|
resolve_compose_file,
|
||||||
|
)
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -38,7 +43,7 @@ def _container_image_from_service(stack: str, service: str):
|
||||||
|
|
||||||
|
|
||||||
def parsed_pod_files_map_from_file_names(pod_files):
|
def parsed_pod_files_map_from_file_names(pod_files):
|
||||||
parsed_pod_yaml_map : Any = {}
|
parsed_pod_yaml_map: Any = {}
|
||||||
for pod_file in pod_files:
|
for pod_file in pod_files:
|
||||||
with open(pod_file, "r") as pod_file_descriptor:
|
with open(pod_file, "r") as pod_file_descriptor:
|
||||||
parsed_pod_file = get_yaml().load(pod_file_descriptor)
|
parsed_pod_file = get_yaml().load(pod_file_descriptor)
|
||||||
|
|
@ -73,7 +78,9 @@ def _volumes_to_docker(mounts: List[VolumeMapping]):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]):
|
def run_container_command(
|
||||||
|
ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]
|
||||||
|
):
|
||||||
deployer = ctx.deployer
|
deployer = ctx.deployer
|
||||||
container_image = _container_image_from_service(ctx.stack, service)
|
container_image = _container_image_from_service(ctx.stack, service)
|
||||||
docker_volumes = _volumes_to_docker(mounts)
|
docker_volumes = _volumes_to_docker(mounts)
|
||||||
|
|
@ -81,11 +88,14 @@ def run_container_command(ctx: DeployCommandContext, service: str, command: str,
|
||||||
print(f"Running this command in {service} container: {command}")
|
print(f"Running this command in {service} container: {command}")
|
||||||
docker_output = deployer.run(
|
docker_output = deployer.run(
|
||||||
container_image,
|
container_image,
|
||||||
["-c", command], entrypoint="sh",
|
["-c", command],
|
||||||
# Current laconicd container has a bug where it crashes when run not as root
|
entrypoint="sh",
|
||||||
# Commented out line below is a workaround. Created files end up owned by root on the host
|
# Current laconicd container has a bug where it crashes when run not
|
||||||
|
# as root
|
||||||
|
# Commented out line below is a workaround. Created files end up
|
||||||
|
# owned by root on the host
|
||||||
# user=f"{os.getuid()}:{os.getgid()}",
|
# user=f"{os.getuid()}:{os.getgid()}",
|
||||||
volumes=docker_volumes
|
volumes=docker_volumes,
|
||||||
)
|
)
|
||||||
# There doesn't seem to be a way to get an exit code from docker.run()
|
# There doesn't seem to be a way to get an exit code from docker.run()
|
||||||
return (docker_output, 0)
|
return (docker_output, 0)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
class Deployer(ABC):
|
class Deployer(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
pass
|
pass
|
||||||
|
|
@ -52,7 +51,17 @@ class Deployer(ABC):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(
|
||||||
|
self,
|
||||||
|
image: str,
|
||||||
|
command=None,
|
||||||
|
user=None,
|
||||||
|
volumes=None,
|
||||||
|
entrypoint=None,
|
||||||
|
env={},
|
||||||
|
ports=[],
|
||||||
|
detach=False,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
|
@ -66,7 +75,6 @@ class DeployerException(Exception):
|
||||||
|
|
||||||
|
|
||||||
class DeployerConfigGenerator(ABC):
|
class DeployerConfigGenerator(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate(self, deployment_dir: Path):
|
def generate(self, deployment_dir: Path):
|
||||||
pass
|
pass
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,14 @@
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator
|
from stack_orchestrator.deploy.k8s.deploy_k8s import (
|
||||||
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
|
K8sDeployer,
|
||||||
|
K8sDeployerConfigGenerator,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.compose.deploy_docker import (
|
||||||
|
DockerDeployer,
|
||||||
|
DockerDeployerConfigGenerator,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def getDeployerConfigGenerator(type: str, deployment_context):
|
def getDeployerConfigGenerator(type: str, deployment_context):
|
||||||
|
|
@ -27,10 +33,27 @@ def getDeployerConfigGenerator(type: str, deployment_context):
|
||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|
||||||
|
|
||||||
def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file):
|
def getDeployer(
|
||||||
|
type: str, deployment_context, compose_files, compose_project_name, compose_env_file
|
||||||
|
):
|
||||||
if type == "compose" or type is None:
|
if type == "compose" or type is None:
|
||||||
return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
|
return DockerDeployer(
|
||||||
elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
|
type,
|
||||||
return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
|
deployment_context,
|
||||||
|
compose_files,
|
||||||
|
compose_project_name,
|
||||||
|
compose_env_file,
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
type == type == constants.k8s_deploy_type
|
||||||
|
or type == constants.k8s_kind_deploy_type
|
||||||
|
):
|
||||||
|
return K8sDeployer(
|
||||||
|
type,
|
||||||
|
deployment_context,
|
||||||
|
compose_files,
|
||||||
|
compose_project_name,
|
||||||
|
compose_env_file,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,19 @@ from pathlib import Path
|
||||||
import sys
|
import sys
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.images import push_images_operation
|
from stack_orchestrator.deploy.images import push_images_operation
|
||||||
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation
|
from stack_orchestrator.deploy.deploy import (
|
||||||
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context, update_operation
|
up_operation,
|
||||||
|
down_operation,
|
||||||
|
ps_operation,
|
||||||
|
port_operation,
|
||||||
|
status_operation,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.deploy import (
|
||||||
|
exec_operation,
|
||||||
|
logs_operation,
|
||||||
|
create_deploy_context,
|
||||||
|
update_operation,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
|
|
||||||
|
|
@ -28,7 +39,7 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
@click.option("--dir", required=True, help="path to deployment directory")
|
@click.option("--dir", required=True, help="path to deployment directory")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, dir):
|
def command(ctx, dir):
|
||||||
'''manage a deployment'''
|
"""manage a deployment"""
|
||||||
|
|
||||||
# Check that --stack wasn't supplied
|
# Check that --stack wasn't supplied
|
||||||
if ctx.parent.obj.stack:
|
if ctx.parent.obj.stack:
|
||||||
|
|
@ -40,7 +51,10 @@ def command(ctx, dir):
|
||||||
print(f"Error: deployment directory {dir} does not exist")
|
print(f"Error: deployment directory {dir} does not exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not dir_path.is_dir():
|
if not dir_path.is_dir():
|
||||||
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
|
print(
|
||||||
|
f"Error: supplied deployment directory path {dir} exists but is a "
|
||||||
|
"file not a directory"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Store the deployment context for subcommands
|
# Store the deployment context for subcommands
|
||||||
deployment_context = DeploymentContext()
|
deployment_context = DeploymentContext()
|
||||||
|
|
@ -57,16 +71,31 @@ def make_deploy_context(ctx) -> DeployCommandContext:
|
||||||
else:
|
else:
|
||||||
deployment_type = constants.compose_deploy_type
|
deployment_type = constants.compose_deploy_type
|
||||||
stack = context.deployment_dir
|
stack = context.deployment_dir
|
||||||
return create_deploy_context(ctx.parent.parent.obj, context, stack, None, None,
|
return create_deploy_context(
|
||||||
cluster_name, env_file, deployment_type)
|
ctx.parent.parent.obj,
|
||||||
|
context,
|
||||||
|
stack,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
cluster_name,
|
||||||
|
env_file,
|
||||||
|
deployment_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# TODO: remove legacy up command since it's an alias for start
|
# TODO: remove legacy up command since it's an alias for start
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--stay-attached/--detatch-terminal",
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
default=False,
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
help="detatch or not to see container stdout",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
|
@ -76,10 +105,17 @@ def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
|
|
||||||
# start is the preferred alias for up
|
# start is the preferred alias for up
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--stay-attached/--detatch-terminal",
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
default=False,
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
help="detatch or not to see container stdout",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
|
@ -89,10 +125,15 @@ def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
|
|
||||||
# TODO: remove legacy up command since it's an alias for stop
|
# TODO: remove legacy up command since it's an alias for stop
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
)
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: down <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
# Get the stack config file name
|
# Get the stack config file name
|
||||||
|
|
@ -103,10 +144,15 @@ def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
|
|
||||||
# stop is the preferred alias for down
|
# stop is the preferred alias for down
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
)
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: down <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def stop(ctx, delete_volumes, skip_cluster_management, extra_args):
|
def stop(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
|
|
@ -130,7 +176,7 @@ def push_images(ctx):
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
|
@ -138,7 +184,7 @@ def port(ctx, extra_args):
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
|
@click.argument("extra_args", nargs=-1) # help: command: exec <service> <command>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def exec(ctx, extra_args):
|
def exec(ctx, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
|
@ -148,7 +194,7 @@ def exec(ctx, extra_args):
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
||||||
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: logs <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def logs(ctx, tail, follow, extra_args):
|
def logs(ctx, tail, follow, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
|
@ -170,11 +216,15 @@ def update(ctx):
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('job_name')
|
@click.argument("job_name")
|
||||||
@click.option('--helm-release', help='Helm release name (only for k8s helm chart deployments, defaults to chart name)')
|
@click.option(
|
||||||
|
"--helm-release",
|
||||||
|
help="Helm release name (for k8s helm chart deployments, defaults to chart name)",
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def run_job(ctx, job_name, helm_release):
|
def run_job(ctx, job_name, helm_release):
|
||||||
'''run a one-time job from the stack'''
|
"""run a one-time job from the stack"""
|
||||||
from stack_orchestrator.deploy.deploy import run_job_operation
|
from stack_orchestrator.deploy.deploy import run_job_operation
|
||||||
|
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
run_job_operation(ctx, job_name, helm_release)
|
run_job_operation(ctx, job_name, helm_release)
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
# Copyright © 2022, 2023 Vulcanize
|
# Copyright © 2022, 2023 Vulcanize
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,23 @@ from secrets import token_hex
|
||||||
import sys
|
import sys
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import (get_stack_path, get_parsed_deployment_spec, get_parsed_stack_config,
|
from stack_orchestrator.util import (
|
||||||
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
|
get_stack_path,
|
||||||
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file,
|
get_parsed_deployment_spec,
|
||||||
resolve_config_dir, get_job_list, get_job_file_path)
|
get_parsed_stack_config,
|
||||||
|
global_options,
|
||||||
|
get_yaml,
|
||||||
|
get_pod_list,
|
||||||
|
get_pod_file_path,
|
||||||
|
pod_has_scripts,
|
||||||
|
get_pod_script_paths,
|
||||||
|
get_plugin_code_paths,
|
||||||
|
error_exit,
|
||||||
|
env_var_map_from_file,
|
||||||
|
resolve_config_dir,
|
||||||
|
get_job_list,
|
||||||
|
get_job_file_path,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.spec import Spec
|
from stack_orchestrator.deploy.spec import Spec
|
||||||
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
||||||
|
|
@ -49,17 +62,15 @@ def _get_ports(stack):
|
||||||
if "services" in parsed_pod_file:
|
if "services" in parsed_pod_file:
|
||||||
for svc_name, svc in parsed_pod_file["services"].items():
|
for svc_name, svc in parsed_pod_file["services"].items():
|
||||||
if "ports" in svc:
|
if "ports" in svc:
|
||||||
# Ports can appear as strings or numbers. We normalize them as strings.
|
# Ports can appear as strings or numbers. We normalize them as
|
||||||
|
# strings.
|
||||||
ports[svc_name] = [str(x) for x in svc["ports"]]
|
ports[svc_name] = [str(x) for x in svc["ports"]]
|
||||||
return ports
|
return ports
|
||||||
|
|
||||||
|
|
||||||
def _get_named_volumes(stack):
|
def _get_named_volumes(stack):
|
||||||
# Parse the compose files looking for named volumes
|
# Parse the compose files looking for named volumes
|
||||||
named_volumes = {
|
named_volumes = {"rw": [], "ro": []}
|
||||||
"rw": [],
|
|
||||||
"ro": []
|
|
||||||
}
|
|
||||||
parsed_stack = get_parsed_stack_config(stack)
|
parsed_stack = get_parsed_stack_config(stack)
|
||||||
pods = get_pod_list(parsed_stack)
|
pods = get_pod_list(parsed_stack)
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
|
|
@ -75,7 +86,7 @@ def _get_named_volumes(stack):
|
||||||
ret[svc_name] = {
|
ret[svc_name] = {
|
||||||
"volume": parts[0],
|
"volume": parts[0],
|
||||||
"mount": parts[1],
|
"mount": parts[1],
|
||||||
"options": parts[2] if len(parts) == 3 else None
|
"options": parts[2] if len(parts) == 3 else None,
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
@ -88,7 +99,10 @@ def _get_named_volumes(stack):
|
||||||
for vu in find_vol_usage(parsed_pod_file, volume).values():
|
for vu in find_vol_usage(parsed_pod_file, volume).values():
|
||||||
read_only = vu["options"] == "ro"
|
read_only = vu["options"] == "ro"
|
||||||
if read_only:
|
if read_only:
|
||||||
if vu["volume"] not in named_volumes["rw"] and vu["volume"] not in named_volumes["ro"]:
|
if (
|
||||||
|
vu["volume"] not in named_volumes["rw"]
|
||||||
|
and vu["volume"] not in named_volumes["ro"]
|
||||||
|
):
|
||||||
named_volumes["ro"].append(vu["volume"])
|
named_volumes["ro"].append(vu["volume"])
|
||||||
else:
|
else:
|
||||||
if vu["volume"] not in named_volumes["rw"]:
|
if vu["volume"] not in named_volumes["rw"]:
|
||||||
|
|
@ -108,10 +122,13 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
|
||||||
absolute_path.mkdir(parents=True, exist_ok=True)
|
absolute_path.mkdir(parents=True, exist_ok=True)
|
||||||
else:
|
else:
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
print(f"WARNING: mount path for volume {volume} does not exist: {path_string}")
|
print(
|
||||||
|
f"WARNING: mount path for volume {volume} does not exist: {path_string}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
# See:
|
||||||
|
# https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
||||||
def _fixup_pod_file(pod, spec, compose_dir):
|
def _fixup_pod_file(pod, spec, compose_dir):
|
||||||
deployment_type = spec[constants.deploy_to_key]
|
deployment_type = spec[constants.deploy_to_key]
|
||||||
# Fix up volumes
|
# Fix up volumes
|
||||||
|
|
@ -123,7 +140,11 @@ def _fixup_pod_file(pod, spec, compose_dir):
|
||||||
if volume in spec_volumes:
|
if volume in spec_volumes:
|
||||||
volume_spec = spec_volumes[volume]
|
volume_spec = spec_volumes[volume]
|
||||||
if volume_spec:
|
if volume_spec:
|
||||||
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
|
volume_spec_fixedup = (
|
||||||
|
volume_spec
|
||||||
|
if Path(volume_spec).is_absolute()
|
||||||
|
else f".{volume_spec}"
|
||||||
|
)
|
||||||
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
||||||
# this is Docker specific
|
# this is Docker specific
|
||||||
if spec.is_docker_deployment():
|
if spec.is_docker_deployment():
|
||||||
|
|
@ -132,8 +153,8 @@ def _fixup_pod_file(pod, spec, compose_dir):
|
||||||
"driver_opts": {
|
"driver_opts": {
|
||||||
"type": "none",
|
"type": "none",
|
||||||
"device": volume_spec_fixedup,
|
"device": volume_spec_fixedup,
|
||||||
"o": "bind"
|
"o": "bind",
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
pod["volumes"][volume] = new_volume_spec
|
pod["volumes"][volume] = new_volume_spec
|
||||||
|
|
||||||
|
|
@ -189,12 +210,17 @@ def call_stack_deploy_init(deploy_command_context):
|
||||||
init_done = True
|
init_done = True
|
||||||
else:
|
else:
|
||||||
# TODO: remove this restriction
|
# TODO: remove this restriction
|
||||||
print(f"Skipping init() from plugin {python_file_path}. Only one init() is allowed.")
|
print(
|
||||||
|
f"Skipping init() from plugin {python_file_path}. "
|
||||||
|
"Only one init() is allowed."
|
||||||
|
)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
# TODO: fold this with function above
|
# TODO: fold this with function above
|
||||||
def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetupCommand, extra_args):
|
def call_stack_deploy_setup(
|
||||||
|
deploy_command_context, parameters: LaconicStackSetupCommand, extra_args
|
||||||
|
):
|
||||||
# Link with the python file in the stack
|
# Link with the python file in the stack
|
||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
|
|
@ -247,7 +273,13 @@ def _find_extra_config_dirs(parsed_pod_file, pod):
|
||||||
|
|
||||||
|
|
||||||
def _get_mapped_ports(stack: str, map_recipe: str):
|
def _get_mapped_ports(stack: str, map_recipe: str):
|
||||||
port_map_recipes = ["any-variable-random", "localhost-same", "any-same", "localhost-fixed-random", "any-fixed-random"]
|
port_map_recipes = [
|
||||||
|
"any-variable-random",
|
||||||
|
"localhost-same",
|
||||||
|
"any-same",
|
||||||
|
"localhost-fixed-random",
|
||||||
|
"any-fixed-random",
|
||||||
|
]
|
||||||
ports = _get_ports(stack)
|
ports = _get_ports(stack)
|
||||||
if ports:
|
if ports:
|
||||||
# Implement any requested mapping recipe
|
# Implement any requested mapping recipe
|
||||||
|
|
@ -259,7 +291,9 @@ def _get_mapped_ports(stack: str, map_recipe: str):
|
||||||
orig_port = ports_array[x]
|
orig_port = ports_array[x]
|
||||||
# Strip /udp suffix if present
|
# Strip /udp suffix if present
|
||||||
bare_orig_port = orig_port.replace("/udp", "")
|
bare_orig_port = orig_port.replace("/udp", "")
|
||||||
random_port = random.randint(20000, 50000) # Beware: we're relying on luck to not collide
|
random_port = random.randint(
|
||||||
|
20000, 50000
|
||||||
|
) # Beware: we're relying on luck to not collide
|
||||||
if map_recipe == "any-variable-random":
|
if map_recipe == "any-variable-random":
|
||||||
# This is the default so take no action
|
# This is the default so take no action
|
||||||
pass
|
pass
|
||||||
|
|
@ -278,7 +312,10 @@ def _get_mapped_ports(stack: str, map_recipe: str):
|
||||||
else:
|
else:
|
||||||
print("Error: bad map_recipe")
|
print("Error: bad map_recipe")
|
||||||
else:
|
else:
|
||||||
print(f"Error: --map-ports-to-host must specify one of: {port_map_recipes}")
|
print(
|
||||||
|
f"Error: --map-ports-to-host must specify one of: "
|
||||||
|
f"{port_map_recipes}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return ports
|
return ports
|
||||||
|
|
||||||
|
|
@ -303,33 +340,54 @@ def _parse_config_variables(variable_values: str):
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--config", help="Provide config variables for the deployment")
|
@click.option("--config", help="Provide config variables for the deployment")
|
||||||
@click.option("--config-file", help="Provide config variables in a file for the deployment")
|
@click.option(
|
||||||
|
"--config-file", help="Provide config variables in a file for the deployment"
|
||||||
|
)
|
||||||
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
||||||
@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
|
@click.option(
|
||||||
|
"--image-registry",
|
||||||
|
help="Provide a container image registry url for this k8s cluster",
|
||||||
|
)
|
||||||
@click.option("--output", required=True, help="Write yaml spec file here")
|
@click.option("--output", required=True, help="Write yaml spec file here")
|
||||||
@click.option("--map-ports-to-host", required=False,
|
@click.option(
|
||||||
|
"--map-ports-to-host",
|
||||||
|
required=False,
|
||||||
help="Map ports to the host as one of: any-variable-random (default), "
|
help="Map ports to the host as one of: any-variable-random (default), "
|
||||||
"localhost-same, any-same, localhost-fixed-random, any-fixed-random")
|
"localhost-same, any-same, localhost-fixed-random, any-fixed-random",
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def init(ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host):
|
def init(
|
||||||
|
ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host
|
||||||
|
):
|
||||||
stack = global_options(ctx).stack
|
stack = global_options(ctx).stack
|
||||||
deployer_type = ctx.obj.deployer.type
|
deployer_type = ctx.obj.deployer.type
|
||||||
deploy_command_context = ctx.obj
|
deploy_command_context = ctx.obj
|
||||||
return init_operation(
|
return init_operation(
|
||||||
deploy_command_context,
|
deploy_command_context,
|
||||||
stack, deployer_type,
|
stack,
|
||||||
config, config_file,
|
deployer_type,
|
||||||
|
config,
|
||||||
|
config_file,
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
output,
|
output,
|
||||||
map_ports_to_host)
|
map_ports_to_host,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# The init command's implementation is in a separate function so that we can
|
# The init command's implementation is in a separate function so that we can
|
||||||
# call it from other commands, bypassing the click decoration stuff
|
# call it from other commands, bypassing the click decoration stuff
|
||||||
def init_operation(deploy_command_context, stack, deployer_type, config,
|
def init_operation(
|
||||||
config_file, kube_config, image_registry, output, map_ports_to_host):
|
deploy_command_context,
|
||||||
|
stack,
|
||||||
|
deployer_type,
|
||||||
|
config,
|
||||||
|
config_file,
|
||||||
|
kube_config,
|
||||||
|
image_registry,
|
||||||
|
output,
|
||||||
|
map_ports_to_host,
|
||||||
|
):
|
||||||
default_spec_file_content = call_stack_deploy_init(deploy_command_context)
|
default_spec_file_content = call_stack_deploy_init(deploy_command_context)
|
||||||
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
|
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
|
||||||
if deployer_type == "k8s":
|
if deployer_type == "k8s":
|
||||||
|
|
@ -340,13 +398,20 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
||||||
if image_registry:
|
if image_registry:
|
||||||
spec_file_content.update({constants.image_registry_key: image_registry})
|
spec_file_content.update({constants.image_registry_key: image_registry})
|
||||||
else:
|
else:
|
||||||
print("WARNING: --image-registry not specified, only default container registries (eg, Docker Hub) will be available")
|
print(
|
||||||
|
"WARNING: --image-registry not specified, only default container "
|
||||||
|
"registries (eg, Docker Hub) will be available"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Check for --kube-config supplied for non-relevant deployer types
|
# Check for --kube-config supplied for non-relevant deployer types
|
||||||
if kube_config is not None:
|
if kube_config is not None:
|
||||||
error_exit(f"--kube-config is not allowed with a {deployer_type} deployment")
|
error_exit(
|
||||||
|
f"--kube-config is not allowed with a {deployer_type} deployment"
|
||||||
|
)
|
||||||
if image_registry is not None:
|
if image_registry is not None:
|
||||||
error_exit(f"--image-registry is not allowed with a {deployer_type} deployment")
|
error_exit(
|
||||||
|
f"--image-registry is not allowed with a {deployer_type} deployment"
|
||||||
|
)
|
||||||
if default_spec_file_content:
|
if default_spec_file_content:
|
||||||
spec_file_content.update(default_spec_file_content)
|
spec_file_content.update(default_spec_file_content)
|
||||||
config_variables = _parse_config_variables(config)
|
config_variables = _parse_config_variables(config)
|
||||||
|
|
@ -395,7 +460,9 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
||||||
spec_file_content["configmaps"] = configmap_descriptors
|
spec_file_content["configmaps"] = configmap_descriptors
|
||||||
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
|
print(
|
||||||
|
f"Creating spec file for stack: {stack} with content: {spec_file_content}"
|
||||||
|
)
|
||||||
|
|
||||||
with open(output, "w") as output_file:
|
with open(output, "w") as output_file:
|
||||||
get_yaml().dump(spec_file_content, output_file)
|
get_yaml().dump(spec_file_content, output_file)
|
||||||
|
|
@ -443,22 +510,45 @@ def _check_volume_definitions(spec):
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
@click.option(
|
||||||
|
"--spec-file", required=True, help="Spec file to use to create this deployment"
|
||||||
|
)
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
||||||
@click.option("--helm-chart", is_flag=True, default=False, help="Generate Helm chart instead of deploying (k8s only)")
|
@click.option(
|
||||||
|
"--helm-chart",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Generate Helm chart instead of deploying (k8s only)",
|
||||||
|
)
|
||||||
# TODO: Hack
|
# TODO: Hack
|
||||||
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
||||||
@click.option("--initial-peers", help="Initial set of persistent peers")
|
@click.option("--initial-peers", help="Initial set of persistent peers")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def create(ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers):
|
def create(ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers):
|
||||||
deployment_command_context = ctx.obj
|
deployment_command_context = ctx.obj
|
||||||
return create_operation(deployment_command_context, spec_file, deployment_dir, helm_chart, network_dir, initial_peers)
|
return create_operation(
|
||||||
|
deployment_command_context,
|
||||||
|
spec_file,
|
||||||
|
deployment_dir,
|
||||||
|
helm_chart,
|
||||||
|
network_dir,
|
||||||
|
initial_peers,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# The init command's implementation is in a separate function so that we can
|
# The init command's implementation is in a separate function so that we can
|
||||||
# call it from other commands, bypassing the click decoration stuff
|
# call it from other commands, bypassing the click decoration stuff
|
||||||
def create_operation(deployment_command_context, spec_file, deployment_dir, helm_chart, network_dir, initial_peers):
|
def create_operation(
|
||||||
parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
|
deployment_command_context,
|
||||||
|
spec_file,
|
||||||
|
deployment_dir,
|
||||||
|
helm_chart,
|
||||||
|
network_dir,
|
||||||
|
initial_peers,
|
||||||
|
):
|
||||||
|
parsed_spec = Spec(
|
||||||
|
os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file)
|
||||||
|
)
|
||||||
_check_volume_definitions(parsed_spec)
|
_check_volume_definitions(parsed_spec)
|
||||||
stack_name = parsed_spec["stack"]
|
stack_name = parsed_spec["stack"]
|
||||||
deployment_type = parsed_spec[constants.deploy_to_key]
|
deployment_type = parsed_spec[constants.deploy_to_key]
|
||||||
|
|
@ -483,17 +573,24 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
|
|
||||||
# Branch to Helm chart generation flow if --helm-chart flag is set
|
# Branch to Helm chart generation flow if --helm-chart flag is set
|
||||||
if deployment_type == "k8s" and helm_chart:
|
if deployment_type == "k8s" and helm_chart:
|
||||||
from stack_orchestrator.deploy.k8s.helm.chart_generator import generate_helm_chart
|
from stack_orchestrator.deploy.k8s.helm.chart_generator import (
|
||||||
|
generate_helm_chart,
|
||||||
|
)
|
||||||
|
|
||||||
generate_helm_chart(stack_name, spec_file, deployment_dir_path)
|
generate_helm_chart(stack_name, spec_file, deployment_dir_path)
|
||||||
return # Exit early for helm chart generation
|
return # Exit early for helm chart generation
|
||||||
|
|
||||||
# Existing deployment flow continues unchanged
|
# Existing deployment flow continues unchanged
|
||||||
# Copy any config varibles from the spec file into an env file suitable for compose
|
# Copy any config varibles from the spec file into an env file suitable for compose
|
||||||
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
|
_write_config_file(
|
||||||
|
spec_file, deployment_dir_path.joinpath(constants.config_file_name)
|
||||||
|
)
|
||||||
# Copy any k8s config file into the deployment dir
|
# Copy any k8s config file into the deployment dir
|
||||||
if deployment_type == "k8s":
|
if deployment_type == "k8s":
|
||||||
_write_kube_config_file(Path(parsed_spec[constants.kube_config_key]),
|
_write_kube_config_file(
|
||||||
deployment_dir_path.joinpath(constants.kube_config_filename))
|
Path(parsed_spec[constants.kube_config_key]),
|
||||||
|
deployment_dir_path.joinpath(constants.kube_config_filename),
|
||||||
|
)
|
||||||
# Copy the pod files into the deployment dir, fixing up content
|
# Copy the pod files into the deployment dir, fixing up content
|
||||||
pods = get_pod_list(parsed_stack)
|
pods = get_pod_list(parsed_stack)
|
||||||
destination_compose_dir = deployment_dir_path.joinpath("compose")
|
destination_compose_dir = deployment_dir_path.joinpath("compose")
|
||||||
|
|
@ -510,7 +607,9 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"extra config dirs: {extra_config_dirs}")
|
print(f"extra config dirs: {extra_config_dirs}")
|
||||||
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
|
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
|
||||||
with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file:
|
with open(
|
||||||
|
destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w"
|
||||||
|
) as output_file:
|
||||||
yaml.dump(parsed_pod_file, output_file)
|
yaml.dump(parsed_pod_file, output_file)
|
||||||
# Copy the config files for the pod, if any
|
# Copy the config files for the pod, if any
|
||||||
config_dirs = {pod}
|
config_dirs = {pod}
|
||||||
|
|
@ -518,8 +617,11 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
for config_dir in config_dirs:
|
for config_dir in config_dirs:
|
||||||
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
||||||
if os.path.exists(source_config_dir):
|
if os.path.exists(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
|
destination_config_dir = deployment_dir_path.joinpath(
|
||||||
# If the same config dir appears in multiple pods, it may already have been copied
|
"config", config_dir
|
||||||
|
)
|
||||||
|
# If the same config dir appears in multiple pods, it may already have
|
||||||
|
# been copied
|
||||||
if not os.path.exists(destination_config_dir):
|
if not os.path.exists(destination_config_dir):
|
||||||
copytree(source_config_dir, destination_config_dir)
|
copytree(source_config_dir, destination_config_dir)
|
||||||
# Copy the script files for the pod, if any
|
# Copy the script files for the pod, if any
|
||||||
|
|
@ -532,8 +634,12 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
for configmap in parsed_spec.get_configmaps():
|
for configmap in parsed_spec.get_configmaps():
|
||||||
source_config_dir = resolve_config_dir(stack_name, configmap)
|
source_config_dir = resolve_config_dir(stack_name, configmap)
|
||||||
if os.path.exists(source_config_dir):
|
if os.path.exists(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath("configmaps", configmap)
|
destination_config_dir = deployment_dir_path.joinpath(
|
||||||
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
"configmaps", configmap
|
||||||
|
)
|
||||||
|
copytree(
|
||||||
|
source_config_dir, destination_config_dir, dirs_exist_ok=True
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# TODO: We should probably only do this if the volume is marked :ro.
|
# TODO: We should probably only do this if the volume is marked :ro.
|
||||||
for volume_name, volume_path in parsed_spec.get_volumes().items():
|
for volume_name, volume_path in parsed_spec.get_volumes().items():
|
||||||
|
|
@ -542,8 +648,14 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
|
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath(volume_path)
|
destination_config_dir = deployment_dir_path.joinpath(volume_path)
|
||||||
# Only copy if the destination exists and _is_ empty.
|
# Only copy if the destination exists and _is_ empty.
|
||||||
if os.path.exists(destination_config_dir) and not os.listdir(destination_config_dir):
|
if os.path.exists(destination_config_dir) and not os.listdir(
|
||||||
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
destination_config_dir
|
||||||
|
):
|
||||||
|
copytree(
|
||||||
|
source_config_dir,
|
||||||
|
destination_config_dir,
|
||||||
|
dirs_exist_ok=True,
|
||||||
|
)
|
||||||
|
|
||||||
# Copy the job files into the deployment dir (for Docker deployments)
|
# Copy the job files into the deployment dir (for Docker deployments)
|
||||||
jobs = get_job_list(parsed_stack)
|
jobs = get_job_list(parsed_stack)
|
||||||
|
|
@ -555,22 +667,31 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
if job_file_path and job_file_path.exists():
|
if job_file_path and job_file_path.exists():
|
||||||
parsed_job_file = yaml.load(open(job_file_path, "r"))
|
parsed_job_file = yaml.load(open(job_file_path, "r"))
|
||||||
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
|
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
|
||||||
with open(destination_compose_jobs_dir.joinpath("docker-compose-%s.yml" % job), "w") as output_file:
|
with open(
|
||||||
|
destination_compose_jobs_dir.joinpath(
|
||||||
|
"docker-compose-%s.yml" % job
|
||||||
|
),
|
||||||
|
"w",
|
||||||
|
) as output_file:
|
||||||
yaml.dump(parsed_job_file, output_file)
|
yaml.dump(parsed_job_file, output_file)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Copied job compose file: {job}")
|
print(f"Copied job compose file: {job}")
|
||||||
|
|
||||||
# Delegate to the stack's Python code
|
# Delegate to the stack's Python code
|
||||||
# The deploy create command doesn't require a --stack argument so we need to insert the
|
# The deploy create command doesn't require a --stack argument so we need
|
||||||
# stack member here.
|
# to insert the stack member here.
|
||||||
deployment_command_context.stack = stack_name
|
deployment_command_context.stack = stack_name
|
||||||
deployment_context = DeploymentContext()
|
deployment_context = DeploymentContext()
|
||||||
deployment_context.init(deployment_dir_path)
|
deployment_context.init(deployment_dir_path)
|
||||||
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
||||||
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
|
deployer_config_generator = getDeployerConfigGenerator(
|
||||||
|
deployment_type, deployment_context
|
||||||
|
)
|
||||||
# TODO: make deployment_dir_path a Path above
|
# TODO: make deployment_dir_path a Path above
|
||||||
deployer_config_generator.generate(deployment_dir_path)
|
deployer_config_generator.generate(deployment_dir_path)
|
||||||
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
|
call_stack_deploy_create(
|
||||||
|
deployment_context, [network_dir, initial_peers, deployment_command_context]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# TODO: this code should be in the stack .py files but
|
# TODO: this code should be in the stack .py files but
|
||||||
|
|
@ -580,18 +701,50 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, helm
|
||||||
@click.option("--node-moniker", help="Moniker for this node")
|
@click.option("--node-moniker", help="Moniker for this node")
|
||||||
@click.option("--chain-id", help="The new chain id")
|
@click.option("--chain-id", help="The new chain id")
|
||||||
@click.option("--key-name", help="Name for new node key")
|
@click.option("--key-name", help="Name for new node key")
|
||||||
@click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes")
|
@click.option(
|
||||||
@click.option("--gentx-addresses", type=str, help="List of comma-delimited validator addresses for other nodes")
|
"--gentx-files", help="List of comma-delimited gentx filenames from other nodes"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--gentx-addresses",
|
||||||
|
type=str,
|
||||||
|
help="List of comma-delimited validator addresses for other nodes",
|
||||||
|
)
|
||||||
@click.option("--genesis-file", help="Genesis file for the network")
|
@click.option("--genesis-file", help="Genesis file for the network")
|
||||||
@click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase")
|
@click.option(
|
||||||
|
"--initialize-network", is_flag=True, default=False, help="Initialize phase"
|
||||||
|
)
|
||||||
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
||||||
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
|
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
|
||||||
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
||||||
@click.option("--network-dir", help="Directory for network files")
|
@click.option("--network-dir", help="Directory for network files")
|
||||||
@click.argument('extra_args', nargs=-1)
|
@click.argument("extra_args", nargs=-1)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def setup(ctx, node_moniker, chain_id, key_name, gentx_files, gentx_addresses, genesis_file, initialize_network, join_network,
|
def setup(
|
||||||
connect_network, create_network, network_dir, extra_args):
|
ctx,
|
||||||
parmeters = LaconicStackSetupCommand(chain_id, node_moniker, key_name, initialize_network, join_network, connect_network,
|
node_moniker,
|
||||||
create_network, gentx_files, gentx_addresses, genesis_file, network_dir)
|
chain_id,
|
||||||
|
key_name,
|
||||||
|
gentx_files,
|
||||||
|
gentx_addresses,
|
||||||
|
genesis_file,
|
||||||
|
initialize_network,
|
||||||
|
join_network,
|
||||||
|
connect_network,
|
||||||
|
create_network,
|
||||||
|
network_dir,
|
||||||
|
extra_args,
|
||||||
|
):
|
||||||
|
parmeters = LaconicStackSetupCommand(
|
||||||
|
chain_id,
|
||||||
|
node_moniker,
|
||||||
|
key_name,
|
||||||
|
initialize_network,
|
||||||
|
join_network,
|
||||||
|
connect_network,
|
||||||
|
create_network,
|
||||||
|
gentx_files,
|
||||||
|
gentx_addresses,
|
||||||
|
genesis_file,
|
||||||
|
network_dir,
|
||||||
|
)
|
||||||
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,9 @@ def _image_needs_pushed(image: str):
|
||||||
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
image_name_with_version = (
|
||||||
|
major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
|
)
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
return f"{remote_repo_url}/{image_name}:deploy"
|
return f"{remote_repo_url}/{image_name}:deploy"
|
||||||
|
|
@ -61,17 +63,22 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
||||||
|
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
|
new_remote_tags = [
|
||||||
|
_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags
|
||||||
|
]
|
||||||
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
||||||
|
|
||||||
|
|
||||||
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
image_name_with_version = (
|
||||||
|
major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
|
)
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
# Salt the tag with part of the deployment id to make it unique to this deployment
|
# Salt the tag with part of the deployment id to make it unique to this
|
||||||
|
# deployment
|
||||||
deployment_tag = deployment_id[-8:]
|
deployment_tag = deployment_id[-8:]
|
||||||
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
||||||
else:
|
else:
|
||||||
|
|
@ -79,7 +86,9 @@ def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id:
|
||||||
|
|
||||||
|
|
||||||
# TODO: needs lots of error handling
|
# TODO: needs lots of error handling
|
||||||
def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext):
|
def push_images_operation(
|
||||||
|
command_context: DeployCommandContext, deployment_context: DeploymentContext
|
||||||
|
):
|
||||||
# Get the list of images for the stack
|
# Get the list of images for the stack
|
||||||
cluster_context = command_context.cluster_context
|
cluster_context = command_context.cluster_context
|
||||||
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
||||||
|
|
@ -88,14 +97,18 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
remote_tag = remote_tag_for_image_unique(
|
||||||
|
image, remote_repo_url, deployment_context.id
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Tagging {image} to {remote_tag}")
|
print(f"Tagging {image} to {remote_tag}")
|
||||||
docker.image.tag(image, remote_tag)
|
docker.image.tag(image, remote_tag)
|
||||||
# Run docker push commands to upload
|
# Run docker push commands to upload
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
remote_tag = remote_tag_for_image_unique(
|
||||||
|
image, remote_repo_url, deployment_context.id
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Pushing image {remote_tag}")
|
print(f"Pushing image {remote_tag}")
|
||||||
docker.image.push(remote_tag)
|
docker.image.push(remote_tag)
|
||||||
|
|
|
||||||
|
|
@ -21,22 +21,33 @@ from typing import Any, List, Set
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import env_var_map_from_file
|
from stack_orchestrator.util import env_var_map_from_file
|
||||||
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
|
named_volumes_from_pod_files,
|
||||||
|
volume_mounts_for_service,
|
||||||
|
volumes_for_pod_files,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
||||||
from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
|
envs_from_environment_variables_map,
|
||||||
|
envs_from_compose_file,
|
||||||
|
merge_envs,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.deploy_util import (
|
||||||
|
parsed_pod_files_map_from_file_names,
|
||||||
|
images_for_deployment,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
||||||
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
||||||
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
||||||
|
|
||||||
DEFAULT_VOLUME_RESOURCES = Resources({
|
DEFAULT_VOLUME_RESOURCES = Resources({"reservations": {"storage": "2Gi"}})
|
||||||
"reservations": {"storage": "2Gi"}
|
|
||||||
})
|
|
||||||
|
|
||||||
DEFAULT_CONTAINER_RESOURCES = Resources({
|
DEFAULT_CONTAINER_RESOURCES = Resources(
|
||||||
|
{
|
||||||
"reservations": {"cpus": "1.0", "memory": "2000M"},
|
"reservations": {"cpus": "1.0", "memory": "2000M"},
|
||||||
"limits": {"cpus": "4.0", "memory": "8000M"},
|
"limits": {"cpus": "4.0", "memory": "8000M"},
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
||||||
|
|
@ -54,8 +65,7 @@ def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequi
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
return client.V1ResourceRequirements(
|
return client.V1ResourceRequirements(
|
||||||
requests=to_dict(resources.reservations),
|
requests=to_dict(resources.reservations), limits=to_dict(resources.limits)
|
||||||
limits=to_dict(resources.limits)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -73,10 +83,12 @@ class ClusterInfo:
|
||||||
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||||
# Find the set of images in the pods
|
# Find the set of images in the pods
|
||||||
self.image_set = images_for_deployment(pod_files)
|
self.image_set = images_for_deployment(pod_files)
|
||||||
self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file))
|
self.environment_variables = DeployEnvVars(
|
||||||
|
env_var_map_from_file(compose_env_file)
|
||||||
|
)
|
||||||
self.app_name = deployment_name
|
self.app_name = deployment_name
|
||||||
self.spec = spec
|
self.spec = spec
|
||||||
if (opts.o.debug):
|
if opts.o.debug:
|
||||||
print(f"Env vars: {self.environment_variables.map}")
|
print(f"Env vars: {self.environment_variables.map}")
|
||||||
|
|
||||||
def get_nodeports(self):
|
def get_nodeports(self):
|
||||||
|
|
@ -90,7 +102,8 @@ class ClusterInfo:
|
||||||
for raw_port in [str(p) for p in service_info["ports"]]:
|
for raw_port in [str(p) for p in service_info["ports"]]:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"service port: {raw_port}")
|
print(f"service port: {raw_port}")
|
||||||
# Parse protocol suffix (e.g., "8001/udp" -> port=8001, protocol=UDP)
|
# Parse protocol suffix (e.g., "8001/udp" -> port=8001,
|
||||||
|
# protocol=UDP)
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
port_str = raw_port
|
port_str = raw_port
|
||||||
if "/" in raw_port:
|
if "/" in raw_port:
|
||||||
|
|
@ -106,22 +119,31 @@ class ClusterInfo:
|
||||||
node_port = None
|
node_port = None
|
||||||
pod_port = int(port_str)
|
pod_port = int(port_str)
|
||||||
service = client.V1Service(
|
service = client.V1Service(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}-{protocol.lower()}"),
|
metadata=client.V1ObjectMeta(
|
||||||
|
name=(
|
||||||
|
f"{self.app_name}-nodeport-"
|
||||||
|
f"{pod_port}-{protocol.lower()}"
|
||||||
|
)
|
||||||
|
),
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
type="NodePort",
|
type="NodePort",
|
||||||
ports=[client.V1ServicePort(
|
ports=[
|
||||||
|
client.V1ServicePort(
|
||||||
port=pod_port,
|
port=pod_port,
|
||||||
target_port=pod_port,
|
target_port=pod_port,
|
||||||
node_port=node_port,
|
node_port=node_port,
|
||||||
protocol=protocol
|
protocol=protocol,
|
||||||
)],
|
|
||||||
selector={"app": self.app_name}
|
|
||||||
)
|
)
|
||||||
|
],
|
||||||
|
selector={"app": self.app_name},
|
||||||
|
),
|
||||||
)
|
)
|
||||||
nodeports.append(service)
|
nodeports.append(service)
|
||||||
return nodeports
|
return nodeports
|
||||||
|
|
||||||
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
|
def get_ingress(
|
||||||
|
self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"
|
||||||
|
):
|
||||||
# No ingress for a deployment that has no http-proxy defined, for now
|
# No ingress for a deployment that has no http-proxy defined, for now
|
||||||
http_proxy_info_list = self.spec.get_http_proxy()
|
http_proxy_info_list = self.spec.get_http_proxy()
|
||||||
ingress = None
|
ingress = None
|
||||||
|
|
@ -133,10 +155,20 @@ class ClusterInfo:
|
||||||
# TODO: good enough parsing for webapp deployment for now
|
# TODO: good enough parsing for webapp deployment for now
|
||||||
host_name = http_proxy_info["host-name"]
|
host_name = http_proxy_info["host-name"]
|
||||||
rules = []
|
rules = []
|
||||||
tls = [client.V1IngressTLS(
|
tls = (
|
||||||
hosts=certificate["spec"]["dnsNames"] if certificate else [host_name],
|
[
|
||||||
secret_name=certificate["spec"]["secretName"] if certificate else f"{self.app_name}-tls"
|
client.V1IngressTLS(
|
||||||
)] if use_tls else None
|
hosts=certificate["spec"]["dnsNames"]
|
||||||
|
if certificate
|
||||||
|
else [host_name],
|
||||||
|
secret_name=certificate["spec"]["secretName"]
|
||||||
|
if certificate
|
||||||
|
else f"{self.app_name}-tls",
|
||||||
|
)
|
||||||
|
]
|
||||||
|
if use_tls
|
||||||
|
else None
|
||||||
|
)
|
||||||
paths = []
|
paths = []
|
||||||
for route in http_proxy_info["routes"]:
|
for route in http_proxy_info["routes"]:
|
||||||
path = route["path"]
|
path = route["path"]
|
||||||
|
|
@ -145,7 +177,8 @@ class ClusterInfo:
|
||||||
print(f"proxy config: {path} -> {proxy_to}")
|
print(f"proxy config: {path} -> {proxy_to}")
|
||||||
# proxy_to has the form <service>:<port>
|
# proxy_to has the form <service>:<port>
|
||||||
proxy_to_port = int(proxy_to.split(":")[1])
|
proxy_to_port = int(proxy_to.split(":")[1])
|
||||||
paths.append(client.V1HTTPIngressPath(
|
paths.append(
|
||||||
|
client.V1HTTPIngressPath(
|
||||||
path_type="Prefix",
|
path_type="Prefix",
|
||||||
path=path,
|
path=path,
|
||||||
backend=client.V1IngressBackend(
|
backend=client.V1IngressBackend(
|
||||||
|
|
@ -153,20 +186,17 @@ class ClusterInfo:
|
||||||
# TODO: this looks wrong
|
# TODO: this looks wrong
|
||||||
name=f"{self.app_name}-service",
|
name=f"{self.app_name}-service",
|
||||||
# TODO: pull port number from the service
|
# TODO: pull port number from the service
|
||||||
port=client.V1ServiceBackendPort(number=proxy_to_port)
|
port=client.V1ServiceBackendPort(number=proxy_to_port),
|
||||||
|
)
|
||||||
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
))
|
rules.append(
|
||||||
rules.append(client.V1IngressRule(
|
client.V1IngressRule(
|
||||||
host=host_name,
|
host=host_name, http=client.V1HTTPIngressRuleValue(paths=paths)
|
||||||
http=client.V1HTTPIngressRuleValue(
|
|
||||||
paths=paths
|
|
||||||
)
|
)
|
||||||
))
|
|
||||||
spec = client.V1IngressSpec(
|
|
||||||
tls=tls,
|
|
||||||
rules=rules
|
|
||||||
)
|
)
|
||||||
|
spec = client.V1IngressSpec(tls=tls, rules=rules)
|
||||||
|
|
||||||
ingress_annotations = {
|
ingress_annotations = {
|
||||||
"kubernetes.io/ingress.class": "nginx",
|
"kubernetes.io/ingress.class": "nginx",
|
||||||
|
|
@ -176,10 +206,9 @@ class ClusterInfo:
|
||||||
|
|
||||||
ingress = client.V1Ingress(
|
ingress = client.V1Ingress(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(
|
||||||
name=f"{self.app_name}-ingress",
|
name=f"{self.app_name}-ingress", annotations=ingress_annotations
|
||||||
annotations=ingress_annotations
|
|
||||||
),
|
),
|
||||||
spec=spec
|
spec=spec,
|
||||||
)
|
)
|
||||||
return ingress
|
return ingress
|
||||||
|
|
||||||
|
|
@ -198,12 +227,9 @@ class ClusterInfo:
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
type="ClusterIP",
|
type="ClusterIP",
|
||||||
ports=[client.V1ServicePort(
|
ports=[client.V1ServicePort(port=port, target_port=port)],
|
||||||
port=port,
|
selector={"app": self.app_name},
|
||||||
target_port=port
|
),
|
||||||
)],
|
|
||||||
selector={"app": self.app_name}
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
return service
|
return service
|
||||||
|
|
||||||
|
|
@ -226,7 +252,7 @@ class ClusterInfo:
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"app": self.app_name,
|
"app": self.app_name,
|
||||||
"volume-label": f"{self.app_name}-{volume_name}"
|
"volume-label": f"{self.app_name}-{volume_name}",
|
||||||
}
|
}
|
||||||
if volume_path:
|
if volume_path:
|
||||||
storage_class_name = "manual"
|
storage_class_name = "manual"
|
||||||
|
|
@ -240,11 +266,13 @@ class ClusterInfo:
|
||||||
access_modes=["ReadWriteOnce"],
|
access_modes=["ReadWriteOnce"],
|
||||||
storage_class_name=storage_class_name,
|
storage_class_name=storage_class_name,
|
||||||
resources=to_k8s_resource_requirements(resources),
|
resources=to_k8s_resource_requirements(resources),
|
||||||
volume_name=k8s_volume_name
|
volume_name=k8s_volume_name,
|
||||||
)
|
)
|
||||||
pvc = client.V1PersistentVolumeClaim(
|
pvc = client.V1PersistentVolumeClaim(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels),
|
metadata=client.V1ObjectMeta(
|
||||||
spec=spec
|
name=f"{self.app_name}-{volume_name}", labels=labels
|
||||||
|
),
|
||||||
|
spec=spec,
|
||||||
)
|
)
|
||||||
result.append(pvc)
|
result.append(pvc)
|
||||||
return result
|
return result
|
||||||
|
|
@ -260,20 +288,27 @@ class ClusterInfo:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not cfg_map_path.startswith("/"):
|
if not cfg_map_path.startswith("/"):
|
||||||
cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path)
|
cfg_map_path = os.path.join(
|
||||||
|
os.path.dirname(self.spec.file_path), cfg_map_path
|
||||||
|
)
|
||||||
|
|
||||||
# Read in all the files at a single-level of the directory. This mimics the behavior
|
# Read in all the files at a single-level of the directory.
|
||||||
# of `kubectl create configmap foo --from-file=/path/to/dir`
|
# This mimics the behavior of
|
||||||
|
# `kubectl create configmap foo --from-file=/path/to/dir`
|
||||||
data = {}
|
data = {}
|
||||||
for f in os.listdir(cfg_map_path):
|
for f in os.listdir(cfg_map_path):
|
||||||
full_path = os.path.join(cfg_map_path, f)
|
full_path = os.path.join(cfg_map_path, f)
|
||||||
if os.path.isfile(full_path):
|
if os.path.isfile(full_path):
|
||||||
data[f] = base64.b64encode(open(full_path, 'rb').read()).decode('ASCII')
|
data[f] = base64.b64encode(open(full_path, "rb").read()).decode(
|
||||||
|
"ASCII"
|
||||||
|
)
|
||||||
|
|
||||||
spec = client.V1ConfigMap(
|
spec = client.V1ConfigMap(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
|
metadata=client.V1ObjectMeta(
|
||||||
labels={"configmap-label": cfg_map_name}),
|
name=f"{self.app_name}-{cfg_map_name}",
|
||||||
binary_data=data
|
labels={"configmap-label": cfg_map_name},
|
||||||
|
),
|
||||||
|
binary_data=data,
|
||||||
)
|
)
|
||||||
result.append(spec)
|
result.append(spec)
|
||||||
return result
|
return result
|
||||||
|
|
@ -287,10 +322,14 @@ class ClusterInfo:
|
||||||
resources = DEFAULT_VOLUME_RESOURCES
|
resources = DEFAULT_VOLUME_RESOURCES
|
||||||
for volume_name, volume_path in spec_volumes.items():
|
for volume_name, volume_path in spec_volumes.items():
|
||||||
# We only need to create a volume if it is fully qualified HostPath.
|
# We only need to create a volume if it is fully qualified HostPath.
|
||||||
# Otherwise, we create the PVC and expect the node to allocate the volume for us.
|
# Otherwise, we create the PVC and expect the node to allocate the volume
|
||||||
|
# for us.
|
||||||
if not volume_path:
|
if not volume_path:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.")
|
print(
|
||||||
|
f"{volume_name} does not require an explicit "
|
||||||
|
"PersistentVolume, since it is not a bind-mount."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if volume_name not in named_volumes:
|
if volume_name not in named_volumes:
|
||||||
|
|
@ -299,22 +338,29 @@ class ClusterInfo:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not os.path.isabs(volume_path):
|
if not os.path.isabs(volume_path):
|
||||||
print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.")
|
print(
|
||||||
|
f"WARNING: {volume_name}:{volume_path} is not absolute, "
|
||||||
|
"cannot bind volume."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.spec.is_kind_deployment():
|
if self.spec.is_kind_deployment():
|
||||||
host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name))
|
host_path = client.V1HostPathVolumeSource(
|
||||||
|
path=get_kind_pv_bind_mount_path(volume_name)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
||||||
spec = client.V1PersistentVolumeSpec(
|
spec = client.V1PersistentVolumeSpec(
|
||||||
storage_class_name="manual",
|
storage_class_name="manual",
|
||||||
access_modes=["ReadWriteOnce"],
|
access_modes=["ReadWriteOnce"],
|
||||||
capacity=to_k8s_resource_requirements(resources).requests,
|
capacity=to_k8s_resource_requirements(resources).requests,
|
||||||
host_path=host_path
|
host_path=host_path,
|
||||||
)
|
)
|
||||||
pv = client.V1PersistentVolume(
|
pv = client.V1PersistentVolume(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",
|
metadata=client.V1ObjectMeta(
|
||||||
labels={"volume-label": f"{self.app_name}-{volume_name}"}),
|
name=f"{self.app_name}-{volume_name}",
|
||||||
|
labels={"volume-label": f"{self.app_name}-{volume_name}"},
|
||||||
|
),
|
||||||
spec=spec,
|
spec=spec,
|
||||||
)
|
)
|
||||||
result.append(pv)
|
result.append(pv)
|
||||||
|
|
@ -336,7 +382,8 @@ class ClusterInfo:
|
||||||
container_ports = []
|
container_ports = []
|
||||||
if "ports" in service_info:
|
if "ports" in service_info:
|
||||||
for raw_port in [str(p) for p in service_info["ports"]]:
|
for raw_port in [str(p) for p in service_info["ports"]]:
|
||||||
# Parse protocol suffix (e.g., "8001/udp" -> port=8001, protocol=UDP)
|
# Parse protocol suffix (e.g., "8001/udp" -> port=8001,
|
||||||
|
# protocol=UDP)
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
port_str = raw_port
|
port_str = raw_port
|
||||||
if "/" in raw_port:
|
if "/" in raw_port:
|
||||||
|
|
@ -346,31 +393,48 @@ class ClusterInfo:
|
||||||
if ":" in port_str:
|
if ":" in port_str:
|
||||||
port_str = port_str.split(":")[-1]
|
port_str = port_str.split(":")[-1]
|
||||||
port = int(port_str)
|
port = int(port_str)
|
||||||
container_ports.append(client.V1ContainerPort(container_port=port, protocol=protocol))
|
container_ports.append(
|
||||||
|
client.V1ContainerPort(
|
||||||
|
container_port=port, protocol=protocol
|
||||||
|
)
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image: {image}")
|
print(f"image: {image}")
|
||||||
print(f"service ports: {container_ports}")
|
print(f"service ports: {container_ports}")
|
||||||
merged_envs = merge_envs(
|
merged_envs = (
|
||||||
|
merge_envs(
|
||||||
envs_from_compose_file(
|
envs_from_compose_file(
|
||||||
service_info["environment"], self.environment_variables.map), self.environment_variables.map
|
service_info["environment"], self.environment_variables.map
|
||||||
) if "environment" in service_info else self.environment_variables.map
|
),
|
||||||
|
self.environment_variables.map,
|
||||||
|
)
|
||||||
|
if "environment" in service_info
|
||||||
|
else self.environment_variables.map
|
||||||
|
)
|
||||||
envs = envs_from_environment_variables_map(merged_envs)
|
envs = envs_from_environment_variables_map(merged_envs)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Merged envs: {envs}")
|
print(f"Merged envs: {envs}")
|
||||||
# Re-write the image tag for remote deployment
|
# Re-write the image tag for remote deployment
|
||||||
# Note self.app_name has the same value as deployment_id
|
# Note self.app_name has the same value as deployment_id
|
||||||
image_to_use = remote_tag_for_image_unique(
|
image_to_use = (
|
||||||
image,
|
remote_tag_for_image_unique(
|
||||||
self.spec.get_image_registry(),
|
image, self.spec.get_image_registry(), self.app_name
|
||||||
self.app_name) if self.spec.get_image_registry() is not None else image
|
)
|
||||||
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
|
if self.spec.get_image_registry() is not None
|
||||||
|
else image
|
||||||
|
)
|
||||||
|
volume_mounts = volume_mounts_for_service(
|
||||||
|
self.parsed_pod_yaml_map, service_name
|
||||||
|
)
|
||||||
# Handle command/entrypoint from compose file
|
# Handle command/entrypoint from compose file
|
||||||
# In docker-compose: entrypoint -> k8s command, command -> k8s args
|
# In docker-compose: entrypoint -> k8s command, command -> k8s args
|
||||||
container_command = None
|
container_command = None
|
||||||
container_args = None
|
container_args = None
|
||||||
if "entrypoint" in service_info:
|
if "entrypoint" in service_info:
|
||||||
entrypoint = service_info["entrypoint"]
|
entrypoint = service_info["entrypoint"]
|
||||||
container_command = entrypoint if isinstance(entrypoint, list) else [entrypoint]
|
container_command = (
|
||||||
|
entrypoint if isinstance(entrypoint, list) else [entrypoint]
|
||||||
|
)
|
||||||
if "command" in service_info:
|
if "command" in service_info:
|
||||||
cmd = service_info["command"]
|
cmd = service_info["command"]
|
||||||
container_args = cmd if isinstance(cmd, list) else cmd.split()
|
container_args = cmd if isinstance(cmd, list) else cmd.split()
|
||||||
|
|
@ -387,12 +451,16 @@ class ClusterInfo:
|
||||||
privileged=self.spec.get_privileged(),
|
privileged=self.spec.get_privileged(),
|
||||||
capabilities=client.V1Capabilities(
|
capabilities=client.V1Capabilities(
|
||||||
add=self.spec.get_capabilities()
|
add=self.spec.get_capabilities()
|
||||||
) if self.spec.get_capabilities() else None
|
)
|
||||||
|
if self.spec.get_capabilities()
|
||||||
|
else None,
|
||||||
),
|
),
|
||||||
resources=to_k8s_resource_requirements(resources),
|
resources=to_k8s_resource_requirements(resources),
|
||||||
)
|
)
|
||||||
containers.append(container)
|
containers.append(container)
|
||||||
volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name)
|
volumes = volumes_for_pod_files(
|
||||||
|
self.parsed_pod_yaml_map, self.spec, self.app_name
|
||||||
|
)
|
||||||
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
|
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
|
||||||
|
|
||||||
annotations = None
|
annotations = None
|
||||||
|
|
@ -415,55 +483,54 @@ class ClusterInfo:
|
||||||
affinities = []
|
affinities = []
|
||||||
for rule in self.spec.get_node_affinities():
|
for rule in self.spec.get_node_affinities():
|
||||||
# TODO add some input validation here
|
# TODO add some input validation here
|
||||||
label_name = rule['label']
|
label_name = rule["label"]
|
||||||
label_value = rule['value']
|
label_value = rule["value"]
|
||||||
affinities.append(client.V1NodeSelectorTerm(
|
affinities.append(
|
||||||
match_expressions=[client.V1NodeSelectorRequirement(
|
client.V1NodeSelectorTerm(
|
||||||
key=label_name,
|
match_expressions=[
|
||||||
operator="In",
|
client.V1NodeSelectorRequirement(
|
||||||
values=[label_value]
|
key=label_name, operator="In", values=[label_value]
|
||||||
)]
|
)
|
||||||
|
]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
affinity = client.V1Affinity(
|
affinity = client.V1Affinity(
|
||||||
node_affinity=client.V1NodeAffinity(
|
node_affinity=client.V1NodeAffinity(
|
||||||
required_during_scheduling_ignored_during_execution=client.V1NodeSelector(
|
required_during_scheduling_ignored_during_execution=(
|
||||||
node_selector_terms=affinities
|
client.V1NodeSelector(node_selector_terms=affinities)
|
||||||
))
|
)
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.spec.get_node_tolerations():
|
if self.spec.get_node_tolerations():
|
||||||
tolerations = []
|
tolerations = []
|
||||||
for toleration in self.spec.get_node_tolerations():
|
for toleration in self.spec.get_node_tolerations():
|
||||||
# TODO add some input validation here
|
# TODO add some input validation here
|
||||||
toleration_key = toleration['key']
|
toleration_key = toleration["key"]
|
||||||
toleration_value = toleration['value']
|
toleration_value = toleration["value"]
|
||||||
tolerations.append(client.V1Toleration(
|
tolerations.append(
|
||||||
|
client.V1Toleration(
|
||||||
effect="NoSchedule",
|
effect="NoSchedule",
|
||||||
key=toleration_key,
|
key=toleration_key,
|
||||||
operator="Equal",
|
operator="Equal",
|
||||||
value=toleration_value
|
value=toleration_value,
|
||||||
))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
template = client.V1PodTemplateSpec(
|
template = client.V1PodTemplateSpec(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(annotations=annotations, labels=labels),
|
||||||
annotations=annotations,
|
|
||||||
labels=labels
|
|
||||||
),
|
|
||||||
spec=client.V1PodSpec(
|
spec=client.V1PodSpec(
|
||||||
containers=containers,
|
containers=containers,
|
||||||
image_pull_secrets=image_pull_secrets,
|
image_pull_secrets=image_pull_secrets,
|
||||||
volumes=volumes,
|
volumes=volumes,
|
||||||
affinity=affinity,
|
affinity=affinity,
|
||||||
tolerations=tolerations
|
tolerations=tolerations,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
spec = client.V1DeploymentSpec(
|
spec = client.V1DeploymentSpec(
|
||||||
replicas=self.spec.get_replicas(),
|
replicas=self.spec.get_replicas(),
|
||||||
template=template, selector={
|
template=template,
|
||||||
"matchLabels":
|
selector={"matchLabels": {"app": self.app_name}},
|
||||||
{"app": self.app_name}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
deployment = client.V1Deployment(
|
deployment = client.V1Deployment(
|
||||||
|
|
|
||||||
|
|
@ -23,12 +23,12 @@ from stack_orchestrator.util import (
|
||||||
get_pod_file_path,
|
get_pod_file_path,
|
||||||
get_job_list,
|
get_job_list,
|
||||||
get_job_file_path,
|
get_job_file_path,
|
||||||
error_exit
|
error_exit,
|
||||||
)
|
)
|
||||||
from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import (
|
from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import (
|
||||||
check_kompose_available,
|
check_kompose_available,
|
||||||
get_kompose_version,
|
get_kompose_version,
|
||||||
convert_to_helm_chart
|
convert_to_helm_chart,
|
||||||
)
|
)
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
|
|
||||||
|
|
@ -108,14 +108,17 @@ def _post_process_chart(chart_dir: Path, chart_name: str, jobs: list) -> None:
|
||||||
_wrap_job_templates_with_conditionals(chart_dir, jobs)
|
_wrap_job_templates_with_conditionals(chart_dir, jobs)
|
||||||
|
|
||||||
|
|
||||||
def generate_helm_chart(stack_path: str, spec_file: str, deployment_dir_path: Path) -> None:
|
def generate_helm_chart(
|
||||||
|
stack_path: str, spec_file: str, deployment_dir_path: Path
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Generate a self-sufficient Helm chart from stack compose files using Kompose.
|
Generate a self-sufficient Helm chart from stack compose files using Kompose.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
stack_path: Path to the stack directory
|
stack_path: Path to the stack directory
|
||||||
spec_file: Path to the deployment spec file
|
spec_file: Path to the deployment spec file
|
||||||
deployment_dir_path: Deployment directory path (already created with deployment.yml)
|
deployment_dir_path: Deployment directory path
|
||||||
|
(already created with deployment.yml)
|
||||||
|
|
||||||
Output structure:
|
Output structure:
|
||||||
deployment-dir/
|
deployment-dir/
|
||||||
|
|
@ -208,13 +211,14 @@ def generate_helm_chart(stack_path: str, spec_file: str, deployment_dir_path: Pa
|
||||||
# 5. Create chart directory and invoke Kompose
|
# 5. Create chart directory and invoke Kompose
|
||||||
chart_dir = deployment_dir_path / "chart"
|
chart_dir = deployment_dir_path / "chart"
|
||||||
|
|
||||||
print(f"Converting {len(compose_files)} compose file(s) to Helm chart using Kompose...")
|
print(
|
||||||
|
f"Converting {len(compose_files)} compose file(s) to Helm chart "
|
||||||
|
"using Kompose..."
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
output = convert_to_helm_chart(
|
output = convert_to_helm_chart(
|
||||||
compose_files=compose_files,
|
compose_files=compose_files, output_dir=chart_dir, chart_name=chart_name
|
||||||
output_dir=chart_dir,
|
|
||||||
chart_name=chart_name
|
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Kompose output:\n{output}")
|
print(f"Kompose output:\n{output}")
|
||||||
|
|
@ -291,7 +295,11 @@ Edit the generated template files in `templates/` to customize:
|
||||||
print(f" Stack: {stack_path}")
|
print(f" Stack: {stack_path}")
|
||||||
|
|
||||||
# Count generated files
|
# Count generated files
|
||||||
template_files = list((chart_dir / "templates").glob("*.yaml")) if (chart_dir / "templates").exists() else []
|
template_files = (
|
||||||
|
list((chart_dir / "templates").glob("*.yaml"))
|
||||||
|
if (chart_dir / "templates").exists()
|
||||||
|
else []
|
||||||
|
)
|
||||||
print(f" Files: {len(template_files)} template(s) generated")
|
print(f" Files: {len(template_files)} template(s) generated")
|
||||||
|
|
||||||
print("\nDeployment directory structure:")
|
print("\nDeployment directory structure:")
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ def run_helm_job(
|
||||||
release: str = None,
|
release: str = None,
|
||||||
namespace: str = "default",
|
namespace: str = "default",
|
||||||
timeout: int = 600,
|
timeout: int = 600,
|
||||||
verbose: bool = False
|
verbose: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Run a one-time job from a Helm chart.
|
Run a one-time job from a Helm chart.
|
||||||
|
|
@ -93,22 +93,31 @@ def run_helm_job(
|
||||||
print(f"Running job '{job_name}' from helm chart: {chart_dir}")
|
print(f"Running job '{job_name}' from helm chart: {chart_dir}")
|
||||||
|
|
||||||
# Use helm template to render the job manifest
|
# Use helm template to render the job manifest
|
||||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as tmp_file:
|
with tempfile.NamedTemporaryFile(
|
||||||
|
mode="w", suffix=".yaml", delete=False
|
||||||
|
) as tmp_file:
|
||||||
try:
|
try:
|
||||||
# Render job template with job enabled
|
# Render job template with job enabled
|
||||||
# Use --set-json to properly handle job names with dashes
|
# Use --set-json to properly handle job names with dashes
|
||||||
jobs_dict = {job_name: {"enabled": True}}
|
jobs_dict = {job_name: {"enabled": True}}
|
||||||
values_json = json.dumps(jobs_dict)
|
values_json = json.dumps(jobs_dict)
|
||||||
helm_cmd = [
|
helm_cmd = [
|
||||||
"helm", "template", release, str(chart_dir),
|
"helm",
|
||||||
"--show-only", job_template_file,
|
"template",
|
||||||
"--set-json", f"jobs={values_json}"
|
release,
|
||||||
|
str(chart_dir),
|
||||||
|
"--show-only",
|
||||||
|
job_template_file,
|
||||||
|
"--set-json",
|
||||||
|
f"jobs={values_json}",
|
||||||
]
|
]
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Running: {' '.join(helm_cmd)}")
|
print(f"Running: {' '.join(helm_cmd)}")
|
||||||
|
|
||||||
result = subprocess.run(helm_cmd, check=True, capture_output=True, text=True)
|
result = subprocess.run(
|
||||||
|
helm_cmd, check=True, capture_output=True, text=True
|
||||||
|
)
|
||||||
tmp_file.write(result.stdout)
|
tmp_file.write(result.stdout)
|
||||||
tmp_file.flush()
|
tmp_file.flush()
|
||||||
|
|
||||||
|
|
@ -121,18 +130,30 @@ def run_helm_job(
|
||||||
actual_job_name = manifest.get("metadata", {}).get("name", job_name)
|
actual_job_name = manifest.get("metadata", {}).get("name", job_name)
|
||||||
|
|
||||||
# Apply the job manifest
|
# Apply the job manifest
|
||||||
kubectl_apply_cmd = ["kubectl", "apply", "-f", tmp_file.name, "-n", namespace]
|
kubectl_apply_cmd = [
|
||||||
subprocess.run(kubectl_apply_cmd, check=True, capture_output=True, text=True)
|
"kubectl",
|
||||||
|
"apply",
|
||||||
|
"-f",
|
||||||
|
tmp_file.name,
|
||||||
|
"-n",
|
||||||
|
namespace,
|
||||||
|
]
|
||||||
|
subprocess.run(
|
||||||
|
kubectl_apply_cmd, check=True, capture_output=True, text=True
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Job {actual_job_name} created, waiting for completion...")
|
print(f"Job {actual_job_name} created, waiting for completion...")
|
||||||
|
|
||||||
# Wait for job completion
|
# Wait for job completion
|
||||||
wait_cmd = [
|
wait_cmd = [
|
||||||
"kubectl", "wait", "--for=condition=complete",
|
"kubectl",
|
||||||
|
"wait",
|
||||||
|
"--for=condition=complete",
|
||||||
f"job/{actual_job_name}",
|
f"job/{actual_job_name}",
|
||||||
f"--timeout={timeout}s",
|
f"--timeout={timeout}s",
|
||||||
"-n", namespace
|
"-n",
|
||||||
|
namespace,
|
||||||
]
|
]
|
||||||
|
|
||||||
subprocess.run(wait_cmd, check=True, capture_output=True, text=True)
|
subprocess.run(wait_cmd, check=True, capture_output=True, text=True)
|
||||||
|
|
|
||||||
|
|
@ -38,10 +38,7 @@ def get_kompose_version() -> str:
|
||||||
raise Exception("kompose not found in PATH")
|
raise Exception("kompose not found in PATH")
|
||||||
|
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["kompose", "version"],
|
["kompose", "version"], capture_output=True, text=True, timeout=10
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
timeout=10
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if result.returncode != 0:
|
if result.returncode != 0:
|
||||||
|
|
@ -55,7 +52,9 @@ def get_kompose_version() -> str:
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
||||||
def convert_to_helm_chart(compose_files: List[Path], output_dir: Path, chart_name: str = None) -> str:
|
def convert_to_helm_chart(
|
||||||
|
compose_files: List[Path], output_dir: Path, chart_name: str = None
|
||||||
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Invoke kompose to convert Docker Compose files to a Helm chart.
|
Invoke kompose to convert Docker Compose files to a Helm chart.
|
||||||
|
|
||||||
|
|
@ -92,12 +91,7 @@ def convert_to_helm_chart(compose_files: List[Path], output_dir: Path, chart_nam
|
||||||
cmd.extend(["--chart", "-o", str(output_dir)])
|
cmd.extend(["--chart", "-o", str(output_dir)])
|
||||||
|
|
||||||
# Execute kompose
|
# Execute kompose
|
||||||
result = subprocess.run(
|
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
|
||||||
cmd,
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
timeout=60
|
|
||||||
)
|
|
||||||
|
|
||||||
if result.returncode != 0:
|
if result.returncode != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
|
|
||||||
|
|
@ -21,21 +21,21 @@ from stack_orchestrator.deploy.k8s.helpers import get_kind_cluster
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx):
|
def command(ctx):
|
||||||
'''k8s cluster management commands'''
|
"""k8s cluster management commands"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@command.group()
|
@command.group()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def list(ctx):
|
def list(ctx):
|
||||||
'''list k8s resources'''
|
"""list k8s resources"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@list.command()
|
@list.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def cluster(ctx):
|
def cluster(ctx):
|
||||||
'''Show the existing kind cluster'''
|
"""Show the existing kind cluster"""
|
||||||
existing_cluster = get_kind_cluster()
|
existing_cluster = get_kind_cluster()
|
||||||
if existing_cluster:
|
if existing_cluster:
|
||||||
print(existing_cluster)
|
print(existing_cluster)
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ from stack_orchestrator.util import get_yaml
|
||||||
|
|
||||||
|
|
||||||
class Stack:
|
class Stack:
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
obj: typing.Any
|
obj: typing.Any
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,9 @@ from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
|
|
||||||
def _fixup_container_tag(deployment_dir: str, image: str):
|
def _fixup_container_tag(deployment_dir: str, image: str):
|
||||||
deployment_dir_path = Path(deployment_dir)
|
deployment_dir_path = Path(deployment_dir)
|
||||||
compose_file = deployment_dir_path.joinpath("compose", "docker-compose-webapp-template.yml")
|
compose_file = deployment_dir_path.joinpath(
|
||||||
|
"compose", "docker-compose-webapp-template.yml"
|
||||||
|
)
|
||||||
# replace "cerc/webapp-container:local" in the file with our image tag
|
# replace "cerc/webapp-container:local" in the file with our image tag
|
||||||
with open(compose_file) as rfile:
|
with open(compose_file) as rfile:
|
||||||
contents = rfile.read()
|
contents = rfile.read()
|
||||||
|
|
@ -39,13 +41,13 @@ def _fixup_container_tag(deployment_dir: str, image: str):
|
||||||
def _fixup_url_spec(spec_file_name: str, url: str):
|
def _fixup_url_spec(spec_file_name: str, url: str):
|
||||||
# url is like: https://example.com/path
|
# url is like: https://example.com/path
|
||||||
parsed_url = urlparse(url)
|
parsed_url = urlparse(url)
|
||||||
http_proxy_spec = f'''
|
http_proxy_spec = f"""
|
||||||
http-proxy:
|
http-proxy:
|
||||||
- host-name: {parsed_url.hostname}
|
- host-name: {parsed_url.hostname}
|
||||||
routes:
|
routes:
|
||||||
- path: '{parsed_url.path if parsed_url.path else "/"}'
|
- path: '{parsed_url.path if parsed_url.path else "/"}'
|
||||||
proxy-to: webapp:80
|
proxy-to: webapp:80
|
||||||
'''
|
"""
|
||||||
spec_file_path = Path(spec_file_name)
|
spec_file_path = Path(spec_file_name)
|
||||||
with open(spec_file_path) as rfile:
|
with open(spec_file_path) as rfile:
|
||||||
contents = rfile.read()
|
contents = rfile.read()
|
||||||
|
|
@ -54,11 +56,15 @@ def _fixup_url_spec(spec_file_name: str, url: str):
|
||||||
wfile.write(contents)
|
wfile.write(contents)
|
||||||
|
|
||||||
|
|
||||||
def create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
|
def create_deployment(
|
||||||
|
ctx, deployment_dir, image, url, kube_config, image_registry, env_file
|
||||||
|
):
|
||||||
# Do the equivalent of:
|
# Do the equivalent of:
|
||||||
# 1. laconic-so --stack webapp-template deploy --deploy-to k8s init --output webapp-spec.yml
|
# 1. laconic-so --stack webapp-template deploy --deploy-to k8s init \
|
||||||
|
# --output webapp-spec.yml
|
||||||
# --config (eqivalent of the contents of my-config.env)
|
# --config (eqivalent of the contents of my-config.env)
|
||||||
# 2. laconic-so --stack webapp-template deploy --deploy-to k8s create --deployment-dir test-deployment
|
# 2. laconic-so --stack webapp-template deploy --deploy-to k8s create \
|
||||||
|
# --deployment-dir test-deployment
|
||||||
# --spec-file webapp-spec.yml
|
# --spec-file webapp-spec.yml
|
||||||
# 3. Replace the container image tag with the specified image
|
# 3. Replace the container image tag with the specified image
|
||||||
deployment_dir_path = Path(deployment_dir)
|
deployment_dir_path = Path(deployment_dir)
|
||||||
|
|
@ -83,17 +89,12 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
spec_file_name,
|
spec_file_name,
|
||||||
None
|
None,
|
||||||
)
|
)
|
||||||
# Add the TLS and DNS spec
|
# Add the TLS and DNS spec
|
||||||
_fixup_url_spec(spec_file_name, url)
|
_fixup_url_spec(spec_file_name, url)
|
||||||
create_operation(
|
create_operation(
|
||||||
deploy_command_context,
|
deploy_command_context, spec_file_name, deployment_dir, False, None, None
|
||||||
spec_file_name,
|
|
||||||
deployment_dir,
|
|
||||||
False,
|
|
||||||
None,
|
|
||||||
None
|
|
||||||
)
|
)
|
||||||
# Fix up the container tag inside the deployment compose file
|
# Fix up the container tag inside the deployment compose file
|
||||||
_fixup_container_tag(deployment_dir, image)
|
_fixup_container_tag(deployment_dir, image)
|
||||||
|
|
@ -103,7 +104,7 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx):
|
def command(ctx):
|
||||||
'''manage a webapp deployment'''
|
"""manage a webapp deployment"""
|
||||||
|
|
||||||
# Check that --stack wasn't supplied
|
# Check that --stack wasn't supplied
|
||||||
if ctx.parent.obj.stack:
|
if ctx.parent.obj.stack:
|
||||||
|
|
@ -112,13 +113,20 @@ def command(ctx):
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
||||||
@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
|
@click.option(
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory", required=True)
|
"--image-registry",
|
||||||
|
help="Provide a container image registry url for this k8s cluster",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployment-dir", help="Create deployment files in this directory", required=True
|
||||||
|
)
|
||||||
@click.option("--image", help="image to deploy", required=True)
|
@click.option("--image", help="image to deploy", required=True)
|
||||||
@click.option("--url", help="url to serve", required=True)
|
@click.option("--url", help="url to serve", required=True)
|
||||||
@click.option("--env-file", help="environment file for webapp")
|
@click.option("--env-file", help="environment file for webapp")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
|
def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
|
||||||
'''create a deployment for the specified webapp container'''
|
"""create a deployment for the specified webapp container"""
|
||||||
|
|
||||||
return create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file)
|
return create_deployment(
|
||||||
|
ctx, deployment_dir, image, url, kube_config, image_registry, env_file
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -112,7 +112,8 @@ def process_app_deployment_request(
|
||||||
)
|
)
|
||||||
elif "preexisting" == fqdn_policy:
|
elif "preexisting" == fqdn_policy:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"No pre-existing DnsRecord {dns_lrn} could be found for request {app_deployment_request.id}."
|
f"No pre-existing DnsRecord {dns_lrn} could be found for "
|
||||||
|
f"request {app_deployment_request.id}."
|
||||||
)
|
)
|
||||||
|
|
||||||
# 4. get build and runtime config from request
|
# 4. get build and runtime config from request
|
||||||
|
|
@ -128,7 +129,8 @@ def process_app_deployment_request(
|
||||||
parsed = AttrDict(yaml.safe_load(decrypted.data))
|
parsed = AttrDict(yaml.safe_load(decrypted.data))
|
||||||
if record_owner not in parsed.authorized:
|
if record_owner not in parsed.authorized:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"{record_owner} not authorized to access config {app_deployment_request.attributes.config.ref}"
|
f"{record_owner} not authorized to access config "
|
||||||
|
f"{app_deployment_request.attributes.config.ref}"
|
||||||
)
|
)
|
||||||
if "env" in parsed.config:
|
if "env" in parsed.config:
|
||||||
env.update(parsed.config.env)
|
env.update(parsed.config.env)
|
||||||
|
|
@ -156,8 +158,10 @@ def process_app_deployment_request(
|
||||||
|
|
||||||
deployment_record = laconic.get_record(app_deployment_lrn)
|
deployment_record = laconic.get_record(app_deployment_lrn)
|
||||||
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
||||||
# At present we use this to generate a unique but stable ID for the app's host container
|
# At present we use this to generate a unique but stable ID for the
|
||||||
# TODO: implement support to derive this transparently from the already-unique deployment id
|
# app's host container
|
||||||
|
# TODO: implement support to derive this transparently from the
|
||||||
|
# already-unique deployment id
|
||||||
unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
|
unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
|
||||||
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
||||||
deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id
|
deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id
|
||||||
|
|
@ -166,11 +170,12 @@ def process_app_deployment_request(
|
||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
if deployment_record:
|
if deployment_record:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Deployment record %s exists, but not deployment dir %s. Please remove name."
|
"Deployment record %s exists, but not deployment dir %s. "
|
||||||
% (app_deployment_lrn, deployment_dir)
|
"Please remove name." % (app_deployment_lrn, deployment_dir)
|
||||||
)
|
)
|
||||||
logger.log(
|
logger.log(
|
||||||
f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}"
|
f"Creating webapp deployment in: {deployment_dir} "
|
||||||
|
f"with container id: {deployment_container_tag}"
|
||||||
)
|
)
|
||||||
deploy_webapp.create_deployment(
|
deploy_webapp.create_deployment(
|
||||||
ctx,
|
ctx,
|
||||||
|
|
@ -187,7 +192,8 @@ def process_app_deployment_request(
|
||||||
needs_k8s_deploy = False
|
needs_k8s_deploy = False
|
||||||
if force_rebuild:
|
if force_rebuild:
|
||||||
logger.log(
|
logger.log(
|
||||||
"--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app"
|
"--force-rebuild is enabled so the container will always be "
|
||||||
|
"built now, even if nothing has changed in the app"
|
||||||
)
|
)
|
||||||
# 6. build container (if needed)
|
# 6. build container (if needed)
|
||||||
# TODO: add a comment that explains what this code is doing (not clear to me)
|
# TODO: add a comment that explains what this code is doing (not clear to me)
|
||||||
|
|
@ -199,11 +205,12 @@ def process_app_deployment_request(
|
||||||
needs_k8s_deploy = True
|
needs_k8s_deploy = True
|
||||||
# check if the image already exists
|
# check if the image already exists
|
||||||
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
||||||
# Note: in the code below, calls to add_tags_to_image() won't work at present.
|
# Note: in the code below, calls to add_tags_to_image() won't
|
||||||
# This is because SO deployment code in general re-names the container image
|
# work at present. This is because SO deployment code in general
|
||||||
# to be unique to the deployment. This is done transparently
|
# re-names the container image to be unique to the deployment.
|
||||||
# and so when we call add_tags_to_image() here and try to add tags to the remote image,
|
# This is done transparently and so when we call add_tags_to_image()
|
||||||
# we get the image name wrong. Accordingly I've disabled the relevant code for now.
|
# here and try to add tags to the remote image, we get the image
|
||||||
|
# name wrong. Accordingly I've disabled the relevant code for now.
|
||||||
# This is safe because we are running with --force-rebuild at present
|
# This is safe because we are running with --force-rebuild at present
|
||||||
if shared_tag_exists and not force_rebuild:
|
if shared_tag_exists and not force_rebuild:
|
||||||
# simply add our unique tag to the existing image and we are done
|
# simply add our unique tag to the existing image and we are done
|
||||||
|
|
@ -211,7 +218,9 @@ def process_app_deployment_request(
|
||||||
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
||||||
"tagging it with: {deployment_container_tag} to use in this deployment"
|
"tagging it with: {deployment_container_tag} to use in this deployment"
|
||||||
)
|
)
|
||||||
# add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
# add_tags_to_image(
|
||||||
|
# image_registry, app_image_shared_tag, deployment_container_tag
|
||||||
|
# )
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
extra_build_args = [] # TODO: pull from request
|
extra_build_args = [] # TODO: pull from request
|
||||||
|
|
@ -223,11 +232,15 @@ def process_app_deployment_request(
|
||||||
logger.log(f"Pushing container image: {deployment_container_tag}")
|
logger.log(f"Pushing container image: {deployment_container_tag}")
|
||||||
push_container_image(deployment_dir, logger)
|
push_container_image(deployment_dir, logger)
|
||||||
logger.log("Push complete")
|
logger.log("Push complete")
|
||||||
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
# The build/push commands above will use the unique deployment
|
||||||
|
# tag, so now we need to add the shared tag.
|
||||||
logger.log(
|
logger.log(
|
||||||
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} "
|
||||||
|
f"to newly built image: {deployment_container_tag}"
|
||||||
)
|
)
|
||||||
# add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
# add_tags_to_image(
|
||||||
|
# image_registry, deployment_container_tag, app_image_shared_tag
|
||||||
|
# )
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
logger.log("Requested app is already deployed, skipping build and image push")
|
logger.log("Requested app is already deployed, skipping build and image push")
|
||||||
|
|
@ -306,7 +319,11 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
||||||
help="How to handle requests with an FQDN: prohibit, allow, preexisting",
|
help="How to handle requests with an FQDN: prohibit, allow, preexisting",
|
||||||
default="prohibit",
|
default="prohibit",
|
||||||
)
|
)
|
||||||
@click.option("--ip", help="IP address of the k8s deployment (to be set in DNS record)", default=None)
|
@click.option(
|
||||||
|
"--ip",
|
||||||
|
help="IP address of the k8s deployment (to be set in DNS record)",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
@click.option("--record-namespace-dns", help="eg, lrn://laconic/dns", required=True)
|
@click.option("--record-namespace-dns", help="eg, lrn://laconic/dns", required=True)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--record-namespace-deployments",
|
"--record-namespace-deployments",
|
||||||
|
|
@ -364,7 +381,9 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
||||||
"--private-key-file", help="The private key for decrypting config.", required=True
|
"--private-key-file", help="The private key for decrypting config.", required=True
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--registry-lock-file", help="File path to use for registry mutex lock", default=None
|
"--registry-lock-file",
|
||||||
|
help="File path to use for registry mutex lock",
|
||||||
|
default=None,
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--private-key-passphrase",
|
"--private-key-passphrase",
|
||||||
|
|
@ -421,7 +440,8 @@ def command( # noqa: C901
|
||||||
or not dns_suffix
|
or not dns_suffix
|
||||||
):
|
):
|
||||||
print(
|
print(
|
||||||
"--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required",
|
"--dns-suffix, --record-namespace-dns, and "
|
||||||
|
"--record-namespace-deployments are all required",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
@ -459,14 +479,17 @@ def command( # noqa: C901
|
||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
|
|
||||||
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file)
|
laconic = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
||||||
payment_address = webapp_deployer_record.attributes.paymentAddress
|
payment_address = webapp_deployer_record.attributes.paymentAddress
|
||||||
main_logger.log(f"Payment address: {payment_address}")
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
if min_required_payment and not payment_address:
|
if min_required_payment and not payment_address:
|
||||||
print(
|
print(
|
||||||
f"Minimum payment required, but no payment address listed for deployer: {lrn}.",
|
f"Minimum payment required, but no payment address listed "
|
||||||
|
f"for deployer: {lrn}.",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
@ -536,7 +559,8 @@ def command( # noqa: C901
|
||||||
|
|
||||||
if skip_by_tag(r, include_tags, exclude_tags):
|
if skip_by_tag(r, include_tags, exclude_tags):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Skipping request %s, filtered by tag (include %s, exclude %s, present %s)"
|
"Skipping request %s, filtered by tag "
|
||||||
|
"(include %s, exclude %s, present %s)"
|
||||||
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
||||||
)
|
)
|
||||||
skipped_by_name[requested_name] = r
|
skipped_by_name[requested_name] = r
|
||||||
|
|
@ -581,11 +605,13 @@ def command( # noqa: C901
|
||||||
cancellation_requests[r.id], r
|
cancellation_requests[r.id], r
|
||||||
):
|
):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found deployment cancellation request for {r.id} at {cancellation_requests[r.id].id}"
|
f"Found deployment cancellation request for {r.id} "
|
||||||
|
f"at {cancellation_requests[r.id].id}"
|
||||||
)
|
)
|
||||||
elif r.id in deployments_by_request:
|
elif r.id in deployments_by_request:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found satisfied request for {r.id} at {deployments_by_request[r.id].id}"
|
f"Found satisfied request for {r.id} "
|
||||||
|
f"at {deployments_by_request[r.id].id}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if (
|
if (
|
||||||
|
|
@ -593,7 +619,8 @@ def command( # noqa: C901
|
||||||
and previous_requests[r.id].get("status", "") != "RETRY"
|
and previous_requests[r.id].get("status", "") != "RETRY"
|
||||||
):
|
):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
f"Skipping unsatisfied request {r.id} "
|
||||||
|
"because we have seen it before."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
main_logger.log(f"Request {r.id} needs to processed.")
|
main_logger.log(f"Request {r.id} needs to processed.")
|
||||||
|
|
@ -603,13 +630,7 @@ def command( # noqa: C901
|
||||||
for r in requests_to_check_for_payment:
|
for r in requests_to_check_for_payment:
|
||||||
if r.attributes.auction:
|
if r.attributes.auction:
|
||||||
if auction_requests:
|
if auction_requests:
|
||||||
if confirm_auction(
|
if confirm_auction(laconic, r, lrn, payment_address, main_logger):
|
||||||
laconic,
|
|
||||||
r,
|
|
||||||
lrn,
|
|
||||||
payment_address,
|
|
||||||
main_logger
|
|
||||||
):
|
|
||||||
main_logger.log(f"{r.id}: Auction confirmed.")
|
main_logger.log(f"{r.id}: Auction confirmed.")
|
||||||
requests_to_execute.append(r)
|
requests_to_execute.append(r)
|
||||||
else:
|
else:
|
||||||
|
|
@ -653,7 +674,10 @@ def command( # noqa: C901
|
||||||
run_log_file = None
|
run_log_file = None
|
||||||
run_reg_client = laconic
|
run_reg_client = laconic
|
||||||
try:
|
try:
|
||||||
run_id = f"{r.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
run_id = (
|
||||||
|
f"{r.id}-{str(time.time()).split('.')[0]}-"
|
||||||
|
f"{str(uuid.uuid4()).split('-')[0]}"
|
||||||
|
)
|
||||||
if log_dir:
|
if log_dir:
|
||||||
run_log_dir = os.path.join(log_dir, r.id)
|
run_log_dir = os.path.join(log_dir, r.id)
|
||||||
if not os.path.exists(run_log_dir):
|
if not os.path.exists(run_log_dir):
|
||||||
|
|
@ -664,7 +688,9 @@ def command( # noqa: C901
|
||||||
)
|
)
|
||||||
run_log_file = open(run_log_file_path, "wt")
|
run_log_file = open(run_log_file_path, "wt")
|
||||||
run_reg_client = LaconicRegistryClient(
|
run_reg_client = LaconicRegistryClient(
|
||||||
laconic_config, log_file=run_log_file, mutex_lock_file=registry_lock_file
|
laconic_config,
|
||||||
|
log_file=run_log_file,
|
||||||
|
mutex_lock_file=registry_lock_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
build_logger = TimedLogger(run_id, run_log_file)
|
build_logger = TimedLogger(run_id, run_log_file)
|
||||||
|
|
|
||||||
|
|
@ -44,19 +44,27 @@ def process_app_deployment_auction(
|
||||||
|
|
||||||
# Check auction kind
|
# Check auction kind
|
||||||
if auction.kind != AUCTION_KIND_PROVIDER:
|
if auction.kind != AUCTION_KIND_PROVIDER:
|
||||||
raise Exception(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}")
|
raise Exception(
|
||||||
|
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}"
|
||||||
|
)
|
||||||
|
|
||||||
if current_status == "PENDING":
|
if current_status == "PENDING":
|
||||||
# Skip if pending auction not in commit state
|
# Skip if pending auction not in commit state
|
||||||
if auction.status != AuctionStatus.COMMIT:
|
if auction.status != AuctionStatus.COMMIT:
|
||||||
logger.log(f"Skipping pending request, auction {auction_id} status: {auction.status}")
|
logger.log(
|
||||||
|
f"Skipping pending request, auction {auction_id} "
|
||||||
|
f"status: {auction.status}"
|
||||||
|
)
|
||||||
return "SKIP", ""
|
return "SKIP", ""
|
||||||
|
|
||||||
# Check max_price
|
# Check max_price
|
||||||
bid_amount_int = int(bid_amount)
|
bid_amount_int = int(bid_amount)
|
||||||
max_price_int = int(auction.maxPrice.quantity)
|
max_price_int = int(auction.maxPrice.quantity)
|
||||||
if max_price_int < bid_amount_int:
|
if max_price_int < bid_amount_int:
|
||||||
logger.log(f"Skipping auction {auction_id} with max_price ({max_price_int}) less than bid_amount ({bid_amount_int})")
|
logger.log(
|
||||||
|
f"Skipping auction {auction_id} with max_price ({max_price_int}) "
|
||||||
|
f"less than bid_amount ({bid_amount_int})"
|
||||||
|
)
|
||||||
return "SKIP", ""
|
return "SKIP", ""
|
||||||
|
|
||||||
# Bid on the auction
|
# Bid on the auction
|
||||||
|
|
@ -121,7 +129,9 @@ def dump_known_auction_requests(filename, requests, status="SEEN"):
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--registry-lock-file", help="File path to use for registry mutex lock", default=None
|
"--registry-lock-file",
|
||||||
|
help="File path to use for registry mutex lock",
|
||||||
|
default=None,
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
|
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
|
||||||
|
|
@ -142,7 +152,9 @@ def command(
|
||||||
logger = TimedLogger(file=sys.stderr)
|
logger = TimedLogger(file=sys.stderr)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file)
|
laconic = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
auctions_requests = laconic.app_deployment_auctions()
|
auctions_requests = laconic.app_deployment_auctions()
|
||||||
|
|
||||||
previous_requests = {}
|
previous_requests = {}
|
||||||
|
|
@ -164,7 +176,8 @@ def command(
|
||||||
|
|
||||||
# Handle already seen requests
|
# Handle already seen requests
|
||||||
if r.id in previous_requests:
|
if r.id in previous_requests:
|
||||||
# If it's not in commit or reveal status, skip the request as we've already seen it
|
# If it's not in commit or reveal status, skip the request as we've
|
||||||
|
# already seen it
|
||||||
current_status = previous_requests[r.id].get("status", "")
|
current_status = previous_requests[r.id].get("status", "")
|
||||||
result_status = current_status
|
result_status = current_status
|
||||||
if current_status not in ["COMMIT", "REVEAL"]:
|
if current_status not in ["COMMIT", "REVEAL"]:
|
||||||
|
|
@ -172,7 +185,10 @@ def command(
|
||||||
continue
|
continue
|
||||||
|
|
||||||
reveal_file_path = previous_requests[r.id].get("revealFile", "")
|
reveal_file_path = previous_requests[r.id].get("revealFile", "")
|
||||||
logger.log(f"Found existing auction request {r.id} for application {application}, status {current_status}.")
|
logger.log(
|
||||||
|
f"Found existing auction request {r.id} for application "
|
||||||
|
f"{application}, status {current_status}."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# It's a fresh request, check application record
|
# It's a fresh request, check application record
|
||||||
app = laconic.get_record(application)
|
app = laconic.get_record(application)
|
||||||
|
|
@ -181,7 +197,10 @@ def command(
|
||||||
result_status = "ERROR"
|
result_status = "ERROR"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
logger.log(f"Found pending auction request {r.id} for application {application}.")
|
logger.log(
|
||||||
|
f"Found pending auction request {r.id} for application "
|
||||||
|
f"{application}."
|
||||||
|
)
|
||||||
|
|
||||||
# Add requests to be processed
|
# Add requests to be processed
|
||||||
requests_to_execute.append((r, result_status, reveal_file_path))
|
requests_to_execute.append((r, result_status, reveal_file_path))
|
||||||
|
|
@ -190,9 +209,15 @@ def command(
|
||||||
result_status = "ERROR"
|
result_status = "ERROR"
|
||||||
logger.log(f"ERROR: examining request {r.id}: " + str(e))
|
logger.log(f"ERROR: examining request {r.id}: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
logger.log(f"DONE: Examining request {r.id} with result {result_status}.")
|
logger.log(
|
||||||
|
f"DONE: Examining request {r.id} with result {result_status}."
|
||||||
|
)
|
||||||
if result_status in ["ERROR"]:
|
if result_status in ["ERROR"]:
|
||||||
dump_known_auction_requests(state_file, [AttrDict({"id": r.id, "revealFile": reveal_file_path})], result_status)
|
dump_known_auction_requests(
|
||||||
|
state_file,
|
||||||
|
[AttrDict({"id": r.id, "revealFile": reveal_file_path})],
|
||||||
|
result_status,
|
||||||
|
)
|
||||||
|
|
||||||
logger.log(f"Found {len(requests_to_execute)} request(s) to process.")
|
logger.log(f"Found {len(requests_to_execute)} request(s) to process.")
|
||||||
|
|
||||||
|
|
@ -214,7 +239,11 @@ def command(
|
||||||
logger.log(f"ERROR {r.id}:" + str(e))
|
logger.log(f"ERROR {r.id}:" + str(e))
|
||||||
finally:
|
finally:
|
||||||
logger.log(f"Processing {r.id}: END - {result_status}")
|
logger.log(f"Processing {r.id}: END - {result_status}")
|
||||||
dump_known_auction_requests(state_file, [AttrDict({"id": r.id, "revealFile": reveal_file_path})], result_status)
|
dump_known_auction_requests(
|
||||||
|
state_file,
|
||||||
|
[AttrDict({"id": r.id, "revealFile": reveal_file_path})],
|
||||||
|
result_status,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log("UNCAUGHT ERROR:" + str(e))
|
logger.log("UNCAUGHT ERROR:" + str(e))
|
||||||
raise e
|
raise e
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ def acquire_lock(client, lock_file_path, timeout):
|
||||||
try:
|
try:
|
||||||
# Check if lock file exists and is potentially stale
|
# Check if lock file exists and is potentially stale
|
||||||
if os.path.exists(lock_file_path):
|
if os.path.exists(lock_file_path):
|
||||||
with open(lock_file_path, 'r') as lock_file:
|
with open(lock_file_path, "r") as lock_file:
|
||||||
timestamp = float(lock_file.read().strip())
|
timestamp = float(lock_file.read().strip())
|
||||||
|
|
||||||
# If lock is stale, remove the lock file
|
# If lock is stale, remove the lock file
|
||||||
|
|
@ -25,13 +25,15 @@ def acquire_lock(client, lock_file_path, timeout):
|
||||||
print(f"Stale lock detected, removing lock file {lock_file_path}")
|
print(f"Stale lock detected, removing lock file {lock_file_path}")
|
||||||
os.remove(lock_file_path)
|
os.remove(lock_file_path)
|
||||||
else:
|
else:
|
||||||
print(f"Lock file {lock_file_path} exists and is recent, waiting...")
|
print(
|
||||||
|
f"Lock file {lock_file_path} exists and is recent, waiting..."
|
||||||
|
)
|
||||||
time.sleep(LOCK_RETRY_INTERVAL)
|
time.sleep(LOCK_RETRY_INTERVAL)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Try to create a new lock file with the current timestamp
|
# Try to create a new lock file with the current timestamp
|
||||||
fd = os.open(lock_file_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
fd = os.open(lock_file_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
||||||
with os.fdopen(fd, 'w') as lock_file:
|
with os.fdopen(fd, "w") as lock_file:
|
||||||
lock_file.write(str(time.time()))
|
lock_file.write(str(time.time()))
|
||||||
|
|
||||||
client.mutex_lock_acquired = True
|
client.mutex_lock_acquired = True
|
||||||
|
|
|
||||||
|
|
@ -57,7 +57,10 @@ def fatal(msg: str):
|
||||||
@click.option("--config-ref", help="The ref of an existing config upload to use.")
|
@click.option("--config-ref", help="The ref of an existing config upload to use.")
|
||||||
@click.option(
|
@click.option(
|
||||||
"--make-payment",
|
"--make-payment",
|
||||||
help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.",
|
help=(
|
||||||
|
"The payment to make (in alnt). The value should be a number or "
|
||||||
|
"'auto' to use the deployer's minimum required payment."
|
||||||
|
),
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
||||||
|
|
@ -91,7 +94,10 @@ def command( # noqa: C901
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
if auction_id and (make_payment or use_payment):
|
if auction_id and (make_payment or use_payment):
|
||||||
print("Cannot specify --auction-id with --make-payment or --use-payment", file=sys.stderr)
|
print(
|
||||||
|
"Cannot specify --auction-id with --make-payment or --use-payment",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
if env_file and config_ref:
|
if env_file and config_ref:
|
||||||
|
|
@ -117,7 +123,10 @@ def command( # noqa: C901
|
||||||
# Cross check app against application in the auction record
|
# Cross check app against application in the auction record
|
||||||
auction_app = auction_records_by_id[0].attributes.application
|
auction_app = auction_records_by_id[0].attributes.application
|
||||||
if auction_app != app:
|
if auction_app != app:
|
||||||
fatal(f"Requested application {app} does not match application from auction record {auction_app}")
|
fatal(
|
||||||
|
f"Requested application {app} does not match application "
|
||||||
|
f"from auction record {auction_app}"
|
||||||
|
)
|
||||||
|
|
||||||
# Fetch auction details
|
# Fetch auction details
|
||||||
auction = laconic.get_auction(auction_id)
|
auction = laconic.get_auction(auction_id)
|
||||||
|
|
@ -130,7 +139,9 @@ def command( # noqa: C901
|
||||||
|
|
||||||
# Check auction kind
|
# Check auction kind
|
||||||
if auction.kind != AUCTION_KIND_PROVIDER:
|
if auction.kind != AUCTION_KIND_PROVIDER:
|
||||||
fatal(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}")
|
fatal(
|
||||||
|
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}"
|
||||||
|
)
|
||||||
|
|
||||||
# Check auction status
|
# Check auction status
|
||||||
if auction.status != AuctionStatus.COMPLETED:
|
if auction.status != AuctionStatus.COMPLETED:
|
||||||
|
|
@ -145,9 +156,14 @@ def command( # noqa: C901
|
||||||
# Get deployer record for all the auction winners
|
# Get deployer record for all the auction winners
|
||||||
for auction_winner in auction_winners:
|
for auction_winner in auction_winners:
|
||||||
# TODO: Match auction winner address with provider address?
|
# TODO: Match auction winner address with provider address?
|
||||||
deployer_records_by_owner = laconic.webapp_deployers({"paymentAddress": auction_winner})
|
deployer_records_by_owner = laconic.webapp_deployers(
|
||||||
|
{"paymentAddress": auction_winner}
|
||||||
|
)
|
||||||
if len(deployer_records_by_owner) == 0:
|
if len(deployer_records_by_owner) == 0:
|
||||||
print(f"WARNING: Unable to locate deployer for auction winner {auction_winner}")
|
print(
|
||||||
|
f"WARNING: Unable to locate deployer for auction winner "
|
||||||
|
f"{auction_winner}"
|
||||||
|
)
|
||||||
|
|
||||||
# Take first record with name set
|
# Take first record with name set
|
||||||
target_deployer_record = deployer_records_by_owner[0]
|
target_deployer_record = deployer_records_by_owner[0]
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ import sys
|
||||||
import click
|
import click
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient)
|
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
|
||||||
|
|
||||||
|
|
||||||
def fatal(msg: str):
|
def fatal(msg: str):
|
||||||
|
|
@ -30,18 +30,19 @@ def fatal(msg: str):
|
||||||
"--laconic-config", help="Provide a config file for laconicd", required=True
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--deployer",
|
"--deployer", help="The LRN of the deployer to process this request.", required=True
|
||||||
help="The LRN of the deployer to process this request.",
|
|
||||||
required=True
|
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--deployment",
|
"--deployment",
|
||||||
help="Deployment record (ApplicationDeploymentRecord) id of the deployment to remove.",
|
help="Deployment record (ApplicationDeploymentRecord) id of the deployment.",
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--make-payment",
|
"--make-payment",
|
||||||
help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.",
|
help=(
|
||||||
|
"The payment to make (in alnt). The value should be a number or "
|
||||||
|
"'auto' to use the deployer's minimum required payment."
|
||||||
|
),
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,8 @@
|
||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers; allow re-build of either
|
||||||
|
# all or specific containers
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import click
|
import click
|
||||||
|
|
@ -36,7 +37,7 @@ WEBAPP_PORT = 80
|
||||||
@click.option("--port", help="port to use (default random)")
|
@click.option("--port", help="port to use (default random)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, image, env_file, port):
|
def command(ctx, image, env_file, port):
|
||||||
'''run the specified webapp container'''
|
"""run the specified webapp container"""
|
||||||
|
|
||||||
env = {}
|
env = {}
|
||||||
if env_file:
|
if env_file:
|
||||||
|
|
@ -46,20 +47,35 @@ def command(ctx, image, env_file, port):
|
||||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
||||||
cluster = f"laconic-webapp-{hash}"
|
cluster = f"laconic-webapp-{hash}"
|
||||||
|
|
||||||
deployer = getDeployer(type=constants.compose_deploy_type,
|
deployer = getDeployer(
|
||||||
|
type=constants.compose_deploy_type,
|
||||||
deployment_context=None,
|
deployment_context=None,
|
||||||
compose_files=None,
|
compose_files=None,
|
||||||
compose_project_name=cluster,
|
compose_project_name=cluster,
|
||||||
compose_env_file=None)
|
compose_env_file=None,
|
||||||
|
)
|
||||||
|
|
||||||
ports = []
|
ports = []
|
||||||
if port:
|
if port:
|
||||||
ports = [(port, WEBAPP_PORT)]
|
ports = [(port, WEBAPP_PORT)]
|
||||||
container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, ports=ports, detach=True)
|
container = deployer.run(
|
||||||
|
image,
|
||||||
|
command=[],
|
||||||
|
user=None,
|
||||||
|
volumes=[],
|
||||||
|
entrypoint=None,
|
||||||
|
env=env,
|
||||||
|
ports=ports,
|
||||||
|
detach=True,
|
||||||
|
)
|
||||||
|
|
||||||
# Make configurable?
|
# Make configurable?
|
||||||
webappPort = f"{WEBAPP_PORT}/tcp"
|
webappPort = f"{WEBAPP_PORT}/tcp"
|
||||||
# TODO: This assumes a Docker container object...
|
# TODO: This assumes a Docker container object...
|
||||||
if webappPort in container.network_settings.ports:
|
if webappPort in container.network_settings.ports:
|
||||||
mapping = container.network_settings.ports[webappPort][0]
|
mapping = container.network_settings.ports[webappPort][0]
|
||||||
print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""")
|
print(
|
||||||
|
f"Image: {image}\n"
|
||||||
|
f"ID: {container.id}\n"
|
||||||
|
f"URL: http://localhost:{mapping['HostPort']}"
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,8 @@ def process_app_removal_request(
|
||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
raise Exception("Deployment directory %s does not exist." % deployment_dir)
|
raise Exception("Deployment directory %s does not exist." % deployment_dir)
|
||||||
|
|
||||||
# Check if the removal request is from the owner of the DnsRecord or deployment record.
|
# Check if the removal request is from the owner of the DnsRecord or
|
||||||
|
# deployment record.
|
||||||
matched_owner = match_owner(app_removal_request, deployment_record, dns_record)
|
matched_owner = match_owner(app_removal_request, deployment_record, dns_record)
|
||||||
|
|
||||||
# Or of the original deployment request.
|
# Or of the original deployment request.
|
||||||
|
|
@ -69,9 +70,10 @@ def process_app_removal_request(
|
||||||
% (deployment_record.id, app_removal_request.id)
|
% (deployment_record.id, app_removal_request.id)
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO(telackey): Call the function directly. The easiest way to build the correct click context is to
|
# TODO(telackey): Call the function directly. The easiest way to build
|
||||||
# exec the process, but it would be better to refactor so we could just call down_operation with the
|
# the correct click context is to exec the process, but it would be better
|
||||||
# necessary parameters
|
# to refactor so we could just call down_operation with the necessary
|
||||||
|
# parameters
|
||||||
down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"]
|
down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"]
|
||||||
if delete_volumes:
|
if delete_volumes:
|
||||||
down_command.append("--delete-volumes")
|
down_command.append("--delete-volumes")
|
||||||
|
|
@ -179,7 +181,9 @@ def dump_known_requests(filename, requests):
|
||||||
is_flag=True,
|
is_flag=True,
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--registry-lock-file", help="File path to use for registry mutex lock", default=None
|
"--registry-lock-file",
|
||||||
|
help="File path to use for registry mutex lock",
|
||||||
|
default=None,
|
||||||
)
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command( # noqa: C901
|
def command( # noqa: C901
|
||||||
|
|
@ -216,14 +220,17 @@ def command( # noqa: C901
|
||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
|
|
||||||
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file)
|
laconic = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
deployer_record = laconic.get_record(lrn, require=True)
|
deployer_record = laconic.get_record(lrn, require=True)
|
||||||
payment_address = deployer_record.attributes.paymentAddress
|
payment_address = deployer_record.attributes.paymentAddress
|
||||||
main_logger.log(f"Payment address: {payment_address}")
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
if min_required_payment and not payment_address:
|
if min_required_payment and not payment_address:
|
||||||
print(
|
print(
|
||||||
f"Minimum payment required, but no payment address listed for deployer: {lrn}.",
|
f"Minimum payment required, but no payment address listed "
|
||||||
|
f"for deployer: {lrn}.",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
@ -286,21 +293,25 @@ def command( # noqa: C901
|
||||||
try:
|
try:
|
||||||
if r.attributes.deployment not in named_deployments:
|
if r.attributes.deployment not in named_deployments:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping removal request {r.id} for {r.attributes.deployment} because it does"
|
f"Skipping removal request {r.id} for "
|
||||||
f"not appear to refer to a live, named deployment."
|
f"{r.attributes.deployment} because it does not appear to "
|
||||||
|
"refer to a live, named deployment."
|
||||||
)
|
)
|
||||||
elif skip_by_tag(r, include_tags, exclude_tags):
|
elif skip_by_tag(r, include_tags, exclude_tags):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)"
|
"Skipping removal request %s, filtered by tag "
|
||||||
|
"(include %s, exclude %s, present %s)"
|
||||||
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
||||||
)
|
)
|
||||||
elif r.id in removals_by_request:
|
elif r.id in removals_by_request:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}"
|
f"Found satisfied request for {r.id} "
|
||||||
|
f"at {removals_by_request[r.id].id}"
|
||||||
)
|
)
|
||||||
elif r.attributes.deployment in removals_by_deployment:
|
elif r.attributes.deployment in removals_by_deployment:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found removal record for indicated deployment {r.attributes.deployment} at "
|
f"Found removal record for indicated deployment "
|
||||||
|
f"{r.attributes.deployment} at "
|
||||||
f"{removals_by_deployment[r.attributes.deployment].id}"
|
f"{removals_by_deployment[r.attributes.deployment].id}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|
@ -309,7 +320,8 @@ def command( # noqa: C901
|
||||||
requests_to_check_for_payment.append(r)
|
requests_to_check_for_payment.append(r)
|
||||||
else:
|
else:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
f"Skipping unsatisfied request {r.id} "
|
||||||
|
"because we have seen it before."
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
main_logger.log(f"ERROR examining {r.id}: {e}")
|
main_logger.log(f"ERROR examining {r.id}: {e}")
|
||||||
|
|
|
||||||
|
|
@ -497,7 +497,7 @@ class LaconicRegistryClient:
|
||||||
"--max-price",
|
"--max-price",
|
||||||
str(auction["max_price"]),
|
str(auction["max_price"]),
|
||||||
"--num-providers",
|
"--num-providers",
|
||||||
str(auction["num_providers"])
|
str(auction["num_providers"]),
|
||||||
]
|
]
|
||||||
|
|
||||||
return json.loads(logged_cmd(self.log_file, *args))["auctionId"]
|
return json.loads(logged_cmd(self.log_file, *args))["auctionId"]
|
||||||
|
|
@ -561,7 +561,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
extra_build_args = []
|
extra_build_args = []
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
# TODO: determine if this code could be calling into the Python git library like setup-repositories
|
# TODO: determine if this code could be calling into the Python git
|
||||||
|
# library like setup-repositories
|
||||||
try:
|
try:
|
||||||
record_id = app_record["id"]
|
record_id = app_record["id"]
|
||||||
ref = app_record.attributes.repository_ref
|
ref = app_record.attributes.repository_ref
|
||||||
|
|
@ -570,7 +571,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
|
|
||||||
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
||||||
# Set github credentials if present running a command like:
|
# Set github credentials if present running a command like:
|
||||||
# git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
|
# git config --global url."https://${TOKEN}:@github.com/".insteadOf
|
||||||
|
# "https://github.com/"
|
||||||
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
||||||
if github_token:
|
if github_token:
|
||||||
logger.log("Github token detected, setting it in the git environment")
|
logger.log("Github token detected, setting it in the git environment")
|
||||||
|
|
@ -612,7 +614,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
# TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)?
|
# TODO: why is this code different vs the branch above (run vs check_call,
|
||||||
|
# and no prompt disable)?
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["git", "clone", "--depth", "1", repo, clone_dir],
|
["git", "clone", "--depth", "1", repo, clone_dir],
|
||||||
stdout=logger.file,
|
stdout=logger.file,
|
||||||
|
|
@ -749,9 +752,13 @@ def publish_deployment(
|
||||||
|
|
||||||
# Set auction or payment id from request
|
# Set auction or payment id from request
|
||||||
if app_deployment_request.attributes.auction:
|
if app_deployment_request.attributes.auction:
|
||||||
new_deployment_record["record"]["auction"] = app_deployment_request.attributes.auction
|
new_deployment_record["record"][
|
||||||
|
"auction"
|
||||||
|
] = app_deployment_request.attributes.auction
|
||||||
elif app_deployment_request.attributes.payment:
|
elif app_deployment_request.attributes.payment:
|
||||||
new_deployment_record["record"]["payment"] = app_deployment_request.attributes.payment
|
new_deployment_record["record"][
|
||||||
|
"payment"
|
||||||
|
] = app_deployment_request.attributes.payment
|
||||||
|
|
||||||
if webapp_deployer_record:
|
if webapp_deployer_record:
|
||||||
new_deployment_record["record"]["deployer"] = webapp_deployer_record.names[0]
|
new_deployment_record["record"]["deployer"] = webapp_deployer_record.names[0]
|
||||||
|
|
@ -801,7 +808,9 @@ def skip_by_tag(r, include_tags, exclude_tags):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min_amount, logger):
|
def confirm_payment(
|
||||||
|
laconic: LaconicRegistryClient, record, payment_address, min_amount, logger
|
||||||
|
):
|
||||||
req_owner = laconic.get_owner(record)
|
req_owner = laconic.get_owner(record)
|
||||||
if req_owner == payment_address:
|
if req_owner == payment_address:
|
||||||
# No need to confirm payment if the sender and recipient are the same account.
|
# No need to confirm payment if the sender and recipient are the same account.
|
||||||
|
|
@ -818,27 +827,30 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min
|
||||||
|
|
||||||
if tx.code != 0:
|
if tx.code != 0:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: payment tx {tx.hash} was not successful - code: {tx.code}, log: {tx.log}"
|
f"{record.id}: payment tx {tx.hash} was not successful - "
|
||||||
|
f"code: {tx.code}, log: {tx.log}"
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if tx.sender != req_owner:
|
if tx.sender != req_owner:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} does not match deployment "
|
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} "
|
||||||
f"request owner {req_owner}"
|
f"does not match deployment request owner {req_owner}"
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if tx.recipient != payment_address:
|
if tx.recipient != payment_address:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} does not match {payment_address}"
|
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} "
|
||||||
|
f"does not match {payment_address}"
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
|
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
|
||||||
if pay_denom != "alnt":
|
if pay_denom != "alnt":
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected payment denomination"
|
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected "
|
||||||
|
"payment denomination"
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
@ -859,7 +871,10 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min
|
||||||
|
|
||||||
# Check that payment was used for deployment of same application
|
# Check that payment was used for deployment of same application
|
||||||
if record.attributes.application != used_request.attributes.application:
|
if record.attributes.application != used_request.attributes.application:
|
||||||
logger.log(f"{record.id}: payment {tx.hash} already used on a different application deployment {used}")
|
logger.log(
|
||||||
|
f"{record.id}: payment {tx.hash} already used on a different "
|
||||||
|
f"application deployment {used}"
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
used = laconic.app_deployment_removals(
|
used = laconic.app_deployment_removals(
|
||||||
|
|
@ -874,7 +889,9 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def confirm_auction(laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger):
|
def confirm_auction(
|
||||||
|
laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger
|
||||||
|
):
|
||||||
auction_id = record.attributes.auction
|
auction_id = record.attributes.auction
|
||||||
auction = laconic.get_auction(auction_id)
|
auction = laconic.get_auction(auction_id)
|
||||||
|
|
||||||
|
|
@ -886,11 +903,14 @@ def confirm_auction(laconic: LaconicRegistryClient, record, deployer_lrn, paymen
|
||||||
|
|
||||||
# Cross check app against application in the auction record
|
# Cross check app against application in the auction record
|
||||||
requested_app = laconic.get_record(record.attributes.application, require=True)
|
requested_app = laconic.get_record(record.attributes.application, require=True)
|
||||||
auction_app = laconic.get_record(auction_records_by_id[0].attributes.application, require=True)
|
auction_app = laconic.get_record(
|
||||||
|
auction_records_by_id[0].attributes.application, require=True
|
||||||
|
)
|
||||||
if requested_app.id != auction_app.id:
|
if requested_app.id != auction_app.id:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: requested application {record.attributes.application} does not match application from "
|
f"{record.id}: requested application {record.attributes.application} "
|
||||||
f"auction record {auction_records_by_id[0].attributes.application}"
|
f"does not match application from auction record "
|
||||||
|
f"{auction_records_by_id[0].attributes.application}"
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,8 @@ from stack_orchestrator.repos import fetch_stack
|
||||||
from stack_orchestrator.build import build_containers, fetch_containers
|
from stack_orchestrator.build import build_containers, fetch_containers
|
||||||
from stack_orchestrator.build import build_npms
|
from stack_orchestrator.build import build_npms
|
||||||
from stack_orchestrator.build import build_webapp
|
from stack_orchestrator.build import build_webapp
|
||||||
from stack_orchestrator.deploy.webapp import (run_webapp,
|
from stack_orchestrator.deploy.webapp import (
|
||||||
|
run_webapp,
|
||||||
deploy_webapp,
|
deploy_webapp,
|
||||||
deploy_webapp_from_registry,
|
deploy_webapp_from_registry,
|
||||||
undeploy_webapp_from_registry,
|
undeploy_webapp_from_registry,
|
||||||
|
|
@ -29,29 +30,32 @@ from stack_orchestrator.deploy.webapp import (run_webapp,
|
||||||
publish_deployment_auction,
|
publish_deployment_auction,
|
||||||
handle_deployment_auction,
|
handle_deployment_auction,
|
||||||
request_webapp_deployment,
|
request_webapp_deployment,
|
||||||
request_webapp_undeployment)
|
request_webapp_undeployment,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy import deploy
|
from stack_orchestrator.deploy import deploy
|
||||||
from stack_orchestrator import version
|
from stack_orchestrator import version
|
||||||
from stack_orchestrator.deploy import deployment
|
from stack_orchestrator.deploy import deployment
|
||||||
from stack_orchestrator import opts
|
from stack_orchestrator import opts
|
||||||
from stack_orchestrator import update
|
from stack_orchestrator import update
|
||||||
|
|
||||||
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
|
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
|
||||||
|
|
||||||
|
|
||||||
@click.group(context_settings=CONTEXT_SETTINGS)
|
@click.group(context_settings=CONTEXT_SETTINGS)
|
||||||
@click.option('--stack', help="specify a stack to build/deploy")
|
@click.option("--stack", help="specify a stack to build/deploy")
|
||||||
@click.option('--quiet', is_flag=True, default=False)
|
@click.option("--quiet", is_flag=True, default=False)
|
||||||
@click.option('--verbose', is_flag=True, default=False)
|
@click.option("--verbose", is_flag=True, default=False)
|
||||||
@click.option('--dry-run', is_flag=True, default=False)
|
@click.option("--dry-run", is_flag=True, default=False)
|
||||||
@click.option('--local-stack', is_flag=True, default=False)
|
@click.option("--local-stack", is_flag=True, default=False)
|
||||||
@click.option('--debug', is_flag=True, default=False)
|
@click.option("--debug", is_flag=True, default=False)
|
||||||
@click.option('--continue-on-error', is_flag=True, default=False)
|
@click.option("--continue-on-error", is_flag=True, default=False)
|
||||||
# See: https://click.palletsprojects.com/en/8.1.x/complex/#building-a-git-clone
|
# See: https://click.palletsprojects.com/en/8.1.x/complex/#building-a-git-clone
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
|
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
|
||||||
"""Laconic Stack Orchestrator"""
|
"""Laconic Stack Orchestrator"""
|
||||||
command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
|
command_options = CommandOptions(
|
||||||
|
stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error
|
||||||
|
)
|
||||||
opts.opts.o = command_options
|
opts.opts.o = command_options
|
||||||
ctx.obj = command_options
|
ctx.obj = command_options
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,13 +29,13 @@ from stack_orchestrator.util import error_exit
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.argument('stack-locator')
|
@click.argument("stack-locator")
|
||||||
@click.option('--git-ssh', is_flag=True, default=False)
|
@click.option("--git-ssh", is_flag=True, default=False)
|
||||||
@click.option('--check-only', is_flag=True, default=False)
|
@click.option("--check-only", is_flag=True, default=False)
|
||||||
@click.option('--pull', is_flag=True, default=False)
|
@click.option("--pull", is_flag=True, default=False)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
||||||
'''optionally resolve then git clone a repository containing one or more stack definitions'''
|
"""Optionally resolve then git clone a repository with stack definitions."""
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
|
||||||
|
|
@ -25,15 +25,20 @@ from tqdm import tqdm
|
||||||
import click
|
import click
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, include_exclude_check, error_exit, warn_exit
|
from stack_orchestrator.util import (
|
||||||
|
get_parsed_stack_config,
|
||||||
|
include_exclude_check,
|
||||||
|
error_exit,
|
||||||
|
warn_exit,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class GitProgress(git.RemoteProgress):
|
class GitProgress(git.RemoteProgress):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.pbar = tqdm(unit='B', ascii=True, unit_scale=True)
|
self.pbar = tqdm(unit="B", ascii=True, unit_scale=True)
|
||||||
|
|
||||||
def update(self, op_code, cur_count, max_count=None, message=''):
|
def update(self, op_code, cur_count, max_count=None, message=""):
|
||||||
self.pbar.total = max_count
|
self.pbar.total = max_count
|
||||||
self.pbar.n = cur_count
|
self.pbar.n = cur_count
|
||||||
self.pbar.refresh()
|
self.pbar.refresh()
|
||||||
|
|
@ -46,14 +51,16 @@ def is_git_repo(path):
|
||||||
except git.exc.InvalidGitRepositoryError:
|
except git.exc.InvalidGitRepositoryError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
# TODO: find a place for this in the context of click
|
# TODO: find a place for this in the context of click
|
||||||
# parser = argparse.ArgumentParser(
|
# parser = argparse.ArgumentParser(
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars: "
|
||||||
|
# "CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
# )
|
# )
|
||||||
|
|
||||||
|
|
||||||
def branch_strip(s):
|
def branch_strip(s):
|
||||||
return s.split('@')[0]
|
return s.split("@")[0]
|
||||||
|
|
||||||
|
|
||||||
def host_and_path_for_repo(fully_qualified_repo):
|
def host_and_path_for_repo(fully_qualified_repo):
|
||||||
|
|
@ -74,43 +81,64 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
|
||||||
current_repo_branch_or_tag = "***UNDETERMINED***"
|
current_repo_branch_or_tag = "***UNDETERMINED***"
|
||||||
is_branch = False
|
is_branch = False
|
||||||
try:
|
try:
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).active_branch.name
|
current_repo_branch_or_tag = git.Repo(
|
||||||
|
full_filesystem_repo_path
|
||||||
|
).active_branch.name
|
||||||
is_branch = True
|
is_branch = True
|
||||||
except TypeError:
|
except TypeError:
|
||||||
# This means that the current ref is not a branch, so possibly a tag
|
# This means that the current ref is not a branch, so possibly a tag
|
||||||
# Let's try to get the tag
|
# Let's try to get the tag
|
||||||
try:
|
try:
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
|
current_repo_branch_or_tag = git.Repo(
|
||||||
# Note that git is asymmetric -- the tag you told it to check out may not be the one
|
full_filesystem_repo_path
|
||||||
# you get back here (if there are multiple tags associated with the same commit)
|
).git.describe("--tags", "--exact-match")
|
||||||
|
# Note that git is asymmetric -- the tag you told it to check out
|
||||||
|
# may not be the one you get back here (if there are multiple tags
|
||||||
|
# associated with the same commit)
|
||||||
except GitCommandError:
|
except GitCommandError:
|
||||||
# If there is no matching branch or tag checked out, just use the current SHA
|
# If there is no matching branch or tag checked out, just use the current
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
|
# SHA
|
||||||
|
current_repo_branch_or_tag = (
|
||||||
|
git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
|
||||||
|
)
|
||||||
return current_repo_branch_or_tag, is_branch
|
return current_repo_branch_or_tag, is_branch
|
||||||
|
|
||||||
|
|
||||||
# TODO: fix the messy arg list here
|
# TODO: fix the messy arg list here
|
||||||
def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
|
def process_repo(
|
||||||
|
pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo
|
||||||
|
):
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Processing repo: {fully_qualified_repo}")
|
print(f"Processing repo: {fully_qualified_repo}")
|
||||||
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
||||||
git_ssh_prefix = f"git@{repo_host}:"
|
git_ssh_prefix = f"git@{repo_host}:"
|
||||||
git_http_prefix = f"https://{repo_host}/"
|
git_http_prefix = f"https://{repo_host}/"
|
||||||
full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
|
full_github_repo_path = (
|
||||||
|
f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
|
||||||
|
)
|
||||||
repoName = repo_path.split("/")[-1]
|
repoName = repo_path.split("/")[-1]
|
||||||
full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
|
full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
|
||||||
is_present = os.path.isdir(full_filesystem_repo_path)
|
is_present = os.path.isdir(full_filesystem_repo_path)
|
||||||
(current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(
|
(current_repo_branch_or_tag, is_branch) = (
|
||||||
full_filesystem_repo_path
|
_get_repo_current_branch_or_tag(full_filesystem_repo_path)
|
||||||
) if is_present else (None, None)
|
if is_present
|
||||||
|
else (None, None)
|
||||||
|
)
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
present_text = f"already exists active {'branch' if is_branch else 'ref'}: {current_repo_branch_or_tag}" if is_present \
|
present_text = (
|
||||||
else 'Needs to be fetched'
|
f"already exists active {'branch' if is_branch else 'ref'}: "
|
||||||
|
f"{current_repo_branch_or_tag}"
|
||||||
|
if is_present
|
||||||
|
else "Needs to be fetched"
|
||||||
|
)
|
||||||
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
||||||
# Quick check that it's actually a repo
|
# Quick check that it's actually a repo
|
||||||
if is_present:
|
if is_present:
|
||||||
if not is_git_repo(full_filesystem_repo_path):
|
if not is_git_repo(full_filesystem_repo_path):
|
||||||
print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
|
print(
|
||||||
|
f"Error: {full_filesystem_repo_path} does not contain "
|
||||||
|
"a valid git repository"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
if pull:
|
if pull:
|
||||||
|
|
@ -128,11 +156,16 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
||||||
if not is_present:
|
if not is_present:
|
||||||
# Clone
|
# Clone
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
|
print(
|
||||||
|
f"Running git clone for {full_github_repo_path} "
|
||||||
|
f"into {full_filesystem_repo_path}"
|
||||||
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
git.Repo.clone_from(full_github_repo_path,
|
git.Repo.clone_from(
|
||||||
|
full_github_repo_path,
|
||||||
full_filesystem_repo_path,
|
full_filesystem_repo_path,
|
||||||
progress=None if opts.o.quiet else GitProgress())
|
progress=None if opts.o.quiet else GitProgress(),
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("(git clone skipped)")
|
print("(git clone skipped)")
|
||||||
# Checkout the requested branch, if one was specified
|
# Checkout the requested branch, if one was specified
|
||||||
|
|
@ -150,8 +183,8 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
||||||
|
|
||||||
if branch_to_checkout:
|
if branch_to_checkout:
|
||||||
if current_repo_branch_or_tag is None or (
|
if current_repo_branch_or_tag is None or (
|
||||||
current_repo_branch_or_tag and (
|
current_repo_branch_or_tag
|
||||||
current_repo_branch_or_tag != branch_to_checkout)
|
and (current_repo_branch_or_tag != branch_to_checkout)
|
||||||
):
|
):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
||||||
|
|
@ -180,14 +213,14 @@ def parse_branches(branches_string):
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only clone these repositories")
|
@click.option("--include", help="only clone these repositories")
|
||||||
@click.option("--exclude", help="don\'t clone these repositories")
|
@click.option("--exclude", help="don't clone these repositories")
|
||||||
@click.option('--git-ssh', is_flag=True, default=False)
|
@click.option("--git-ssh", is_flag=True, default=False)
|
||||||
@click.option('--check-only', is_flag=True, default=False)
|
@click.option("--check-only", is_flag=True, default=False)
|
||||||
@click.option('--pull', is_flag=True, default=False)
|
@click.option("--pull", is_flag=True, default=False)
|
||||||
@click.option("--branches", help="override branches for repositories")
|
@click.option("--branches", help="override branches for repositories")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||||
'''git clone the set of repositories required to build the complete system from source'''
|
"""git clone the set of repositories required to build the system."""
|
||||||
|
|
||||||
quiet = opts.o.quiet
|
quiet = opts.o.quiet
|
||||||
verbose = opts.o.verbose
|
verbose = opts.o.verbose
|
||||||
|
|
@ -204,22 +237,30 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}")
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "repository-list.txt"
|
||||||
|
) as repository_list_file:
|
||||||
all_repos = repository_list_file.read().splitlines()
|
all_repos = repository_list_file.read().splitlines()
|
||||||
|
|
||||||
repos_in_scope = []
|
repos_in_scope = []
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ from stack_orchestrator.util import get_yaml
|
||||||
def _download_url(url: str, file_path: Path):
|
def _download_url(url: str, file_path: Path):
|
||||||
r = requests.get(url, stream=True)
|
r = requests.get(url, stream=True)
|
||||||
r.raw.decode_content = True
|
r.raw.decode_content = True
|
||||||
with open(file_path, 'wb') as f:
|
with open(file_path, "wb") as f:
|
||||||
shutil.copyfileobj(r.raw, f)
|
shutil.copyfileobj(r.raw, f)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -40,12 +40,14 @@ def _error_exit(s: str):
|
||||||
|
|
||||||
# Note at present this probably won't work on non-Unix based OSes like Windows
|
# Note at present this probably won't work on non-Unix based OSes like Windows
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--check-only", is_flag=True, default=False, help="only check, don't update")
|
@click.option(
|
||||||
|
"--check-only", is_flag=True, default=False, help="only check, don't update"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, check_only):
|
def command(ctx, check_only):
|
||||||
'''update shiv binary from a distribution url'''
|
"""update shiv binary from a distribution url"""
|
||||||
# Get the distribution URL from config
|
# Get the distribution URL from config
|
||||||
config_key = 'distribution-url'
|
config_key = "distribution-url"
|
||||||
config_file_path = Path(os.path.expanduser("~/.laconic-so/config.yml"))
|
config_file_path = Path(os.path.expanduser("~/.laconic-so/config.yml"))
|
||||||
if not config_file_path.exists():
|
if not config_file_path.exists():
|
||||||
_error_exit(f"Error: Config file: {config_file_path} not found")
|
_error_exit(f"Error: Config file: {config_file_path} not found")
|
||||||
|
|
@ -59,7 +61,9 @@ def command(ctx, check_only):
|
||||||
_error_exit(f"ERROR: distribution url: {distribution_url} is not valid")
|
_error_exit(f"ERROR: distribution url: {distribution_url} is not valid")
|
||||||
# Figure out the filename for ourselves
|
# Figure out the filename for ourselves
|
||||||
shiv_binary_path = Path(sys.argv[0])
|
shiv_binary_path = Path(sys.argv[0])
|
||||||
timestamp_filename = f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
|
timestamp_filename = (
|
||||||
|
f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
|
||||||
|
)
|
||||||
temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename)
|
temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename)
|
||||||
# Download the file to a temp filename
|
# Download the file to a temp filename
|
||||||
if ctx.obj.verbose:
|
if ctx.obj.verbose:
|
||||||
|
|
@ -87,4 +91,4 @@ def command(ctx, check_only):
|
||||||
print(f"Replacing: {shiv_binary_path} with {temp_download_path}")
|
print(f"Replacing: {shiv_binary_path} with {temp_download_path}")
|
||||||
os.replace(temp_download_path, shiv_binary_path)
|
os.replace(temp_download_path, shiv_binary_path)
|
||||||
if not ctx.obj.quiet:
|
if not ctx.obj.quiet:
|
||||||
print("Run \"laconic-so version\" to see the newly installed version")
|
print('Run "laconic-so version" to see the newly installed version')
|
||||||
|
|
|
||||||
|
|
@ -38,8 +38,10 @@ def get_stack_path(stack):
|
||||||
if stack_is_external(stack):
|
if stack_is_external(stack):
|
||||||
stack_path = Path(stack)
|
stack_path = Path(stack)
|
||||||
else:
|
else:
|
||||||
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
# In order to be compatible with Python 3.8 we need to use this hack
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# to get the path:
|
||||||
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
|
# python-get-path-of-root-project-structure
|
||||||
stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack)
|
stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack)
|
||||||
return stack_path
|
return stack_path
|
||||||
|
|
||||||
|
|
@ -47,10 +49,15 @@ def get_stack_path(stack):
|
||||||
def get_dev_root_path(ctx):
|
def get_dev_root_path(ctx):
|
||||||
if ctx and ctx.local_stack:
|
if ctx and ctx.local_stack:
|
||||||
# TODO: This code probably doesn't work
|
# TODO: This code probably doesn't work
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
return dev_root_path
|
return dev_root_path
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -102,7 +109,9 @@ def get_plugin_code_paths(stack) -> List[Path]:
|
||||||
if type(pod) is str:
|
if type(pod) is str:
|
||||||
result.add(get_stack_path(stack))
|
result.add(get_stack_path(stack))
|
||||||
else:
|
else:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
|
get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]
|
||||||
|
)
|
||||||
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
||||||
return list(result)
|
return list(result)
|
||||||
|
|
||||||
|
|
@ -157,7 +166,11 @@ def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
||||||
else:
|
else:
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
if pod["name"] == pod_name:
|
if pod["name"] == pod_name:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
|
get_dev_root_path(None),
|
||||||
|
pod["repository"].split("/")[-1],
|
||||||
|
pod["path"],
|
||||||
|
)
|
||||||
result = os.path.join(pod_root_dir, "docker-compose.yml")
|
result = os.path.join(pod_root_dir, "docker-compose.yml")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
@ -180,7 +193,11 @@ def get_pod_script_paths(parsed_stack, pod_name: str):
|
||||||
if not type(pods[0]) is str:
|
if not type(pods[0]) is str:
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
if pod["name"] == pod_name:
|
if pod["name"] == pod_name:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
|
get_dev_root_path(None),
|
||||||
|
pod["repository"].split("/")[-1],
|
||||||
|
pod["path"],
|
||||||
|
)
|
||||||
if "pre_start_command" in pod:
|
if "pre_start_command" in pod:
|
||||||
result.append(os.path.join(pod_root_dir, pod["pre_start_command"]))
|
result.append(os.path.join(pod_root_dir, pod["pre_start_command"]))
|
||||||
if "post_start_command" in pod:
|
if "post_start_command" in pod:
|
||||||
|
|
@ -201,7 +218,8 @@ def pod_has_scripts(parsed_stack, pod_name: str):
|
||||||
|
|
||||||
def get_internal_compose_file_dir():
|
def get_internal_compose_file_dir():
|
||||||
# TODO: refactor to use common code with deploy command
|
# TODO: refactor to use common code with deploy command
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See:
|
||||||
|
# https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||||
source_compose_dir = data_dir.joinpath("compose")
|
source_compose_dir = data_dir.joinpath("compose")
|
||||||
return source_compose_dir
|
return source_compose_dir
|
||||||
|
|
|
||||||
|
|
@ -20,10 +20,11 @@ from importlib import resources, metadata
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx):
|
def command(ctx):
|
||||||
'''print tool version'''
|
"""print tool version"""
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
|
||||||
if resources.is_resource(data, "build_tag.txt"):
|
if resources.is_resource(data, "build_tag.txt"):
|
||||||
with resources.open_text(data, "build_tag.txt") as version_file:
|
with resources.open_text(data, "build_tag.txt") as version_file:
|
||||||
# TODO: code better version that skips comment lines
|
# TODO: code better version that skips comment lines
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue