Compare commits

...

5 Commits

Author SHA1 Message Date
David Boreham cbd7aa2393 Implement down command 2023-06-20 06:03:00 -06:00
David Boreham e320f8dc64 Additional refactoring 2023-06-19 20:54:18 -06:00
David Boreham 08cc9868b9 Merge branch 'main' into dboreham/deployments 2023-06-16 07:48:47 -06:00
David Boreham 75da296222 Add new subcommands 2023-06-03 17:28:56 -06:00
David Boreham 97367fcf15 Initial commit 2023-05-29 18:44:56 -06:00
12 changed files with 429 additions and 38 deletions

View File

@ -15,7 +15,7 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from .deploy_system import get_stack_status from .deploy import get_stack_status
def get_stack(config, stack): def get_stack(config, stack):

View File

@ -0,0 +1,30 @@
services:
laconicd:
restart: unless-stopped
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd/data
# TODO: look at folding these scripts into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
- ../config/fixturenet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473:9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
cli:
image: cerc/laconic-registry-cli:local
volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
volumes:
laconicd-data:

View File

@ -5,7 +5,6 @@ go-ethereum-foundry
ipld-eth-beacon-db ipld-eth-beacon-db
ipld-eth-beacon-indexer ipld-eth-beacon-indexer
ipld-eth-server ipld-eth-server
lighthouse
laconicd laconicd
fixturenet-laconicd fixturenet-laconicd
fixturenet-eth fixturenet-eth

View File

@ -0,0 +1,2 @@
# Laconic Mainnet Deployment (experimental)

View File

@ -0,0 +1,57 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from shutil import copyfile
import sys
from .util import get_stack_config_filename, get_parsed_deployment_spec
default_spec_file_content = """stack: mainnet-laconic
data_dir: /my/path
node_name: my-node-name
"""
def make_default_deployment_dir():
return "deployment-001"
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
with open(output, "w") as output_file:
output_file.write(default_spec_file_content)
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
if ctx.debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
stack_file = get_stack_config_filename(parsed_spec.stack)
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))

View File

@ -0,0 +1,31 @@
version: "1.0"
name: mainnet-laconic
description: "Mainnet laconic node"
repos:
- cerc-io/laconicd
- lirewine/debug
- lirewine/crypto
- lirewine/gem
- lirewine/sdk
- cerc-io/laconic-sdk
- cerc-io/laconic-registry-cli
- cerc-io/laconic-console
npms:
- laconic-sdk
- laconic-registry-cli
- debug
- crypto
- sdk
- gem
- laconic-console
containers:
- cerc/laconicd
- cerc/laconic-registry-cli
- cerc/laconic-console-host
pods:
- mainnet-laconicd
- fixturenet-laconic-console
config:
cli:
key: laconicd.mykey
address: laconicd.myaddress

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Dump environment variables for debugging
echo "Environment variables:"
env
# Test laconic stack
echo "Running laconic stack test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a non-default repo dir
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
rm -rf $CERC_REPO_BASE_DIR
mkdir -p $CERC_REPO_BASE_DIR
# Test bringing the test container up and down
# with and without volume removal
$TEST_TARGET_SO --stack test setup-repositories
$TEST_TARGET_SO --stack test build-containers
$TEST_TARGET_SO --stack test deploy up
$TEST_TARGET_SO --stack test deploy down
# The next time we bring the container up the volume will be old (from the previous run above)
$TEST_TARGET_SO --stack test deploy up
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then
echo "Retain volumes test: passed"
else
echo "Retain volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
# Now when we bring the container up the volume will be new again
$TEST_TARGET_SO --stack test deploy up
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
echo "Delete volumes test: passed"
else
echo "Delete volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
echo "Test passed"

View File

@ -21,12 +21,15 @@ import os
import sys import sys
from dataclasses import dataclass from dataclasses import dataclass
from decouple import config from decouple import config
from importlib import resources
import subprocess import subprocess
from python_on_whales import DockerClient, DockerException from python_on_whales import DockerClient, DockerException
import click import click
import importlib.resources
from pathlib import Path from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config, global_options2
from .deployment_create import create as deployment_create
from .deployment_create import init as deployment_init
class DeployCommandContext(object): class DeployCommandContext(object):
def __init__(self, cluster_context, docker): def __init__(self, cluster_context, docker):
@ -43,44 +46,40 @@ class DeployCommandContext(object):
def command(ctx, include, exclude, env_file, cluster): def command(ctx, include, exclude, env_file, cluster):
'''deploy a stack''' '''deploy a stack'''
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster, env_file) if ctx.parent.obj.debug:
print(f"ctx.parent.obj: {ctx.parent.obj}")
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ ctx.obj = create_deploy_context(global_options2(ctx), global_options2(ctx).stack, include, exclude, cluster, env_file)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
ctx.obj = DeployCommandContext(cluster_context, docker)
# Subcommand is executed now, by the magic of click # Subcommand is executed now, by the magic of click
@command.command() def create_deploy_context(global_context, stack, include, exclude, cluster, env_file):
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2> cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
@click.pass_context # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
def up(ctx, extra_args): docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(cluster_context, docker)
def up_operation(ctx, services_list):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None deploy_context = ctx.obj
if not global_context.dry_run: if not global_context.dry_run:
cluster_context = ctx.obj.cluster_context cluster_context = deploy_context.cluster_context
container_exec_env = _make_runtime_env(global_context) container_exec_env = _make_runtime_env(global_context)
for attr, value in container_exec_env.items(): for attr, value in container_exec_env.items():
os.environ[attr] = value os.environ[attr] = value
if global_context.verbose: if global_context.verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}") print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands: for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command) _run_command(global_context, cluster_context.cluster, pre_start_command)
ctx.obj.docker.compose.up(detach=True, services=extra_args_list) deploy_context.docker.compose.up(detach=True, services=services_list)
for post_start_command in cluster_context.post_start_commands: for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command) _run_command(global_context, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(global_context, cluster_context.config, ctx.obj.docker, container_exec_env) _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
@command.command() def down_operation(ctx, delete_volumes, extra_args_list):
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
if global_context.verbose: if global_context.verbose:
print("Running compose down") print("Running compose down")
@ -91,6 +90,23 @@ def down(ctx, delete_volumes, extra_args):
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes) ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
extra_args_list = list(extra_args) or None
up_operation(ctx, extra_args_list)
@command.command()
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
extra_args_list = list(extra_args) or None
down_operation(ctx, delete_volumes, extra_args_list)
@command.command() @command.command()
@click.pass_context @click.pass_context
def ps(ctx): def ps(ctx):
@ -175,7 +191,7 @@ def get_stack_status(ctx, stack):
ctx_copy = copy.copy(ctx) ctx_copy = copy.copy(ctx)
ctx_copy.stack = stack ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, None, None, None, None) cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above # TODO: refactor to avoid duplicating this code above
if ctx.verbose: if ctx.verbose:
@ -200,7 +216,8 @@ def _make_runtime_env(ctx):
return container_exec_env return container_exec_env
def _make_cluster_context(ctx, include, exclude, cluster, env_file): # stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if ctx.local_stack: if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
@ -208,14 +225,20 @@ def _make_cluster_context(ctx, include, exclude, cluster, env_file):
else: else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure # TODO: huge hack, fix this
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose") # If the caller passed a path for the stack file, then we know that we can get the compose files
# from the same directory
if isinstance(stack, os.PathLike):
compose_dir = stack.parent
else:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
if cluster is None: if cluster is None:
# Create default unique, stable cluster name from confile file path and stack name if provided # Create default unique, stable cluster name from confile file path and stack name if provided
# TODO: change this to the config file path # TODO: change this to the config file path
path = os.path.realpath(sys.argv[0]) path = os.path.realpath(sys.argv[0])
unique_cluster_descriptor = f"{path},{ctx.stack},{include},{exclude}" unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
if ctx.debug: if ctx.debug:
print(f"pre-hash descriptor: {unique_cluster_descriptor}") print(f"pre-hash descriptor: {unique_cluster_descriptor}")
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
@ -225,12 +248,12 @@ def _make_cluster_context(ctx, include, exclude, cluster, env_file):
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from . import data from . import data
with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file: with resources.open_text(data, "pod-list.txt") as pod_list_file:
all_pods = pod_list_file.read().splitlines() all_pods = pod_list_file.read().splitlines()
pods_in_scope = [] pods_in_scope = []
if ctx.stack: if stack:
stack_config = get_parsed_stack_config(ctx.stack) stack_config = get_parsed_stack_config(stack)
# TODO: syntax check the input here # TODO: syntax check the input here
pods_in_scope = stack_config['pods'] pods_in_scope = stack_config['pods']
cluster_config = stack_config['config'] if 'config' in stack_config else None cluster_config = stack_config['config'] if 'config' in stack_config else None
@ -376,3 +399,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
waiting_for_data = False waiting_for_data = False
if ctx.debug: if ctx.debug:
print(f"destination output: {destination_output}") print(f"destination output: {destination_output}")
command.add_command(deployment_init)
command.add_command(deployment_create)

98
app/deployment.py 100644
View File

@ -0,0 +1,98 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
from pathlib import Path
import sys
from .deploy import up_operation, create_deploy_context
from .util import global_options
@dataclass
class DeploymentContext:
dir: Path
@click.group()
@click.option("--dir", required=True, help="path to deployment directory")
@click.pass_context
def command(ctx, dir):
# Check that --stack wasn't supplied
if ctx.parent.obj.stack:
print("Error: --stack can't be supplied with the deployment command")
sys.exit(1)
# Check dir is valid
dir_path = Path(dir)
if not dir_path.exists():
print(f"Error: deployment directory {dir} does not exist")
sys.exit(1)
if not dir_path.is_dir():
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
sys.exit(1)
# Store the deployment context for subcommands
ctx.obj = DeploymentContext(dir_path)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
print(f"Context: {global_options(ctx)}")
# Get the stack config file name
stack_file_path = ctx.obj.dir.joinpath("stack.yml")
# TODO: add cluster name and env file here
ctx.obj = create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, None, None)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
@command.command()
@click.pass_context
def down(ctx):
print(f"Context: {ctx.parent.obj}")
@command.command()
@click.pass_context
def ps(ctx):
print(f"Context: {ctx.parent.obj}")
@command.command()
@click.pass_context
def logs(ctx):
print(f"Context: {ctx.parent.obj}")
@command.command()
@click.pass_context
def task(ctx):
print(f"Context: {ctx.parent.obj}")
@command.command()
@click.pass_context
def status(ctx):
print(f"Context: {ctx.parent.obj}")
#from importlib import resources, util
# TODO: figure out how to do this dynamically
#stack = "mainnet-laconic"
#module_name = "commands"
#spec = util.spec_from_file_location(module_name, "./app/data/stacks/" + stack + "/deploy/commands.py")
#imported_stack = util.module_from_spec(spec)
#spec.loader.exec_module(imported_stack)
#command.add_command(imported_stack.init)
#command.add_command(imported_stack.create)

View File

@ -0,0 +1,67 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from pathlib import Path
from shutil import copyfile
import sys
from .util import get_stack_config_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options
default_spec_file_content = """stack: mainnet-laconic
data_dir: /my/path
node_name: my-node-name
"""
def make_default_deployment_dir():
return "deployment-001"
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
with open(output, "w") as output_file:
output_file.write(default_spec_file_content)
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
stack_file = get_stack_config_path(parsed_spec['stack'])
parsed_stack = get_parsed_stack_config(stack_file)
if global_options(ctx).debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# Copy the pod files into the deployment dir
pods = parsed_stack['pods']
# TODO: refactor to use common code with deploy command
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
for pod in pods:
pod_file_path = os.path.join(compose_dir, f"docker-compose-{pod}.yml")
copyfile(pod_file_path, os.path.join(deployment_dir, os.path.basename(pod_file_path)))

View File

@ -30,10 +30,16 @@ def include_exclude_check(s, include, exclude):
return s not in exclude_list return s not in exclude_list
def get_parsed_stack_config(stack): def get_stack_config_path(stack):
# In order to be compatible with Python 3.8 we need to use this hack to get the path: # In order to be compatible with Python 3.8 we need to use this hack to get the path:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml") stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
return stack_file_path
# Caller can pass either the name of a stack, or a path to a stack file
def get_parsed_stack_config(stack):
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_config_path(stack)
try: try:
with stack_file_path: with stack_file_path:
stack_config = yaml.safe_load(open(stack_file_path, "r")) stack_config = yaml.safe_load(open(stack_file_path, "r"))
@ -48,3 +54,27 @@ def get_parsed_stack_config(stack):
print(f"Error: stack: {stack} does not exist") print(f"Error: stack: {stack} does not exist")
print(f"Exiting, error: {error}") print(f"Exiting, error: {error}")
sys.exit(1) sys.exit(1)
def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file)
try:
with spec_file_path:
deploy_spec = yaml.safe_load(open(spec_file_path, "r"))
return deploy_spec
except FileNotFoundError as error:
# We try here to generate a useful diagnostic error
print(f"Error: spec file: {spec_file_path} does not exist")
print(f"Exiting, error: {error}")
sys.exit(1)
# TODO: this is fragile wrt to the subcommand depth
# See also: https://github.com/pallets/click/issues/108
def global_options(ctx):
return ctx.parent.parent.obj
# TODO: hack
def global_options2(ctx):
return ctx.parent.obj

8
cli.py
View File

@ -19,8 +19,9 @@ from dataclasses import dataclass
from app import setup_repositories from app import setup_repositories
from app import build_containers from app import build_containers
from app import build_npms from app import build_npms
from app import deploy_system from app import deploy
from app import version from app import version
from app import deployment
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@ -54,6 +55,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(setup_repositories.command, "setup-repositories")
cli.add_command(build_containers.command, "build-containers") cli.add_command(build_containers.command, "build-containers")
cli.add_command(build_npms.command, "build-npms") cli.add_command(build_npms.command, "build-npms")
cli.add_command(deploy_system.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
cli.add_command(deploy_system.command, "deploy-system") cli.add_command(deploy.command, "deploy-system")
cli.add_command(deployment.command, "deployment")
cli.add_command(version.command, "version") cli.add_command(version.command, "version")