Compare commits
3 Commits
dboreham/d
...
main
| Author | SHA1 | Date |
|---|---|---|
|
|
ce966f1baa | |
|
|
db4728a9e3 | |
|
|
7ca7bcc952 |
|
|
@ -1,5 +1,7 @@
|
||||||
# Stack Orchestrator
|
# Stack Orchestrator
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Stack Orchestrator allows building and deployment of a Laconic Stack on a single machine with minimial prerequisites. It is a Python3 CLI tool that runs on any OS with Python3 and Docker. The following diagram summarizes the relevant repositories in the Laconic Stack - and the relationship to Stack Orchestrator.
|
Stack Orchestrator allows building and deployment of a Laconic Stack on a single machine with minimial prerequisites. It is a Python3 CLI tool that runs on any OS with Python3 and Docker. The following diagram summarizes the relevant repositories in the Laconic Stack - and the relationship to Stack Orchestrator.
|
||||||
|
|
||||||

|

|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from .deploy import get_stack_status
|
from .deploy_system import get_stack_status
|
||||||
|
|
||||||
|
|
||||||
def get_stack(config, stack):
|
def get_stack(config, stack):
|
||||||
|
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
services:
|
|
||||||
laconicd:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: cerc/laconicd:local
|
|
||||||
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
|
||||||
volumes:
|
|
||||||
# The cosmos-sdk node's database directory:
|
|
||||||
- laconicd-data:/root/.laconicd/data
|
|
||||||
# TODO: look at folding these scripts into the container
|
|
||||||
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
|
|
||||||
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
|
||||||
- ../config/fixturenet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
|
|
||||||
# TODO: determine which of the ports below is really needed
|
|
||||||
ports:
|
|
||||||
- "6060"
|
|
||||||
- "26657"
|
|
||||||
- "26656"
|
|
||||||
- "9473:9473"
|
|
||||||
- "8545"
|
|
||||||
- "8546"
|
|
||||||
- "9090"
|
|
||||||
- "9091"
|
|
||||||
- "1317"
|
|
||||||
cli:
|
|
||||||
image: cerc/laconic-registry-cli:local
|
|
||||||
volumes:
|
|
||||||
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
laconicd-data:
|
|
||||||
|
|
@ -5,6 +5,7 @@ go-ethereum-foundry
|
||||||
ipld-eth-beacon-db
|
ipld-eth-beacon-db
|
||||||
ipld-eth-beacon-indexer
|
ipld-eth-beacon-indexer
|
||||||
ipld-eth-server
|
ipld-eth-server
|
||||||
|
lighthouse
|
||||||
laconicd
|
laconicd
|
||||||
fixturenet-laconicd
|
fixturenet-laconicd
|
||||||
fixturenet-eth
|
fixturenet-eth
|
||||||
|
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
# Laconic Mainnet Deployment (experimental)
|
|
||||||
|
|
||||||
|
|
@ -1,57 +0,0 @@
|
||||||
# Copyright © 2022, 2023 Cerc
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import click
|
|
||||||
import os
|
|
||||||
from shutil import copyfile
|
|
||||||
import sys
|
|
||||||
from .util import get_stack_config_filename, get_parsed_deployment_spec
|
|
||||||
|
|
||||||
default_spec_file_content = """stack: mainnet-laconic
|
|
||||||
data_dir: /my/path
|
|
||||||
node_name: my-node-name
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def make_default_deployment_dir():
|
|
||||||
return "deployment-001"
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--output", required=True, help="Write yaml spec file here")
|
|
||||||
@click.pass_context
|
|
||||||
def init(ctx, output):
|
|
||||||
with open(output, "w") as output_file:
|
|
||||||
output_file.write(default_spec_file_content)
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
|
||||||
@click.pass_context
|
|
||||||
def create(ctx, spec_file, deployment_dir):
|
|
||||||
# This function fails with a useful error message if the file doens't exist
|
|
||||||
parsed_spec = get_parsed_deployment_spec(spec_file)
|
|
||||||
if ctx.debug:
|
|
||||||
print(f"parsed spec: {parsed_spec}")
|
|
||||||
if deployment_dir is None:
|
|
||||||
deployment_dir = make_default_deployment_dir()
|
|
||||||
if os.path.exists(deployment_dir):
|
|
||||||
print(f"Error: {deployment_dir} already exists")
|
|
||||||
sys.exit(1)
|
|
||||||
os.mkdir(deployment_dir)
|
|
||||||
# Copy spec file and the stack file into the deployment dir
|
|
||||||
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
|
|
||||||
stack_file = get_stack_config_filename(parsed_spec.stack)
|
|
||||||
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
||||||
version: "1.0"
|
|
||||||
name: mainnet-laconic
|
|
||||||
description: "Mainnet laconic node"
|
|
||||||
repos:
|
|
||||||
- cerc-io/laconicd
|
|
||||||
- lirewine/debug
|
|
||||||
- lirewine/crypto
|
|
||||||
- lirewine/gem
|
|
||||||
- lirewine/sdk
|
|
||||||
- cerc-io/laconic-sdk
|
|
||||||
- cerc-io/laconic-registry-cli
|
|
||||||
- cerc-io/laconic-console
|
|
||||||
npms:
|
|
||||||
- laconic-sdk
|
|
||||||
- laconic-registry-cli
|
|
||||||
- debug
|
|
||||||
- crypto
|
|
||||||
- sdk
|
|
||||||
- gem
|
|
||||||
- laconic-console
|
|
||||||
containers:
|
|
||||||
- cerc/laconicd
|
|
||||||
- cerc/laconic-registry-cli
|
|
||||||
- cerc/laconic-console-host
|
|
||||||
pods:
|
|
||||||
- mainnet-laconicd
|
|
||||||
- fixturenet-laconic-console
|
|
||||||
config:
|
|
||||||
cli:
|
|
||||||
key: laconicd.mykey
|
|
||||||
address: laconicd.myaddress
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
# Dump environment variables for debugging
|
|
||||||
echo "Environment variables:"
|
|
||||||
env
|
|
||||||
# Test laconic stack
|
|
||||||
echo "Running laconic stack test"
|
|
||||||
# Bit of a hack, test the most recent package
|
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
|
||||||
# Set a non-default repo dir
|
|
||||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
|
||||||
echo "Testing this package: $TEST_TARGET_SO"
|
|
||||||
echo "Test version command"
|
|
||||||
reported_version_string=$( $TEST_TARGET_SO version )
|
|
||||||
echo "Version reported is: ${reported_version_string}"
|
|
||||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
|
||||||
rm -rf $CERC_REPO_BASE_DIR
|
|
||||||
mkdir -p $CERC_REPO_BASE_DIR
|
|
||||||
# Test bringing the test container up and down
|
|
||||||
# with and without volume removal
|
|
||||||
$TEST_TARGET_SO --stack test setup-repositories
|
|
||||||
$TEST_TARGET_SO --stack test build-containers
|
|
||||||
$TEST_TARGET_SO --stack test deploy up
|
|
||||||
$TEST_TARGET_SO --stack test deploy down
|
|
||||||
# The next time we bring the container up the volume will be old (from the previous run above)
|
|
||||||
$TEST_TARGET_SO --stack test deploy up
|
|
||||||
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
|
|
||||||
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then
|
|
||||||
echo "Retain volumes test: passed"
|
|
||||||
else
|
|
||||||
echo "Retain volumes test: FAILED"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
$TEST_TARGET_SO --stack test deploy down --delete-volumes
|
|
||||||
# Now when we bring the container up the volume will be new again
|
|
||||||
$TEST_TARGET_SO --stack test deploy up
|
|
||||||
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
|
|
||||||
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
|
|
||||||
echo "Delete volumes test: passed"
|
|
||||||
else
|
|
||||||
echo "Delete volumes test: FAILED"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
$TEST_TARGET_SO --stack test deploy down --delete-volumes
|
|
||||||
echo "Test passed"
|
|
||||||
|
|
@ -21,15 +21,12 @@ import os
|
||||||
import sys
|
import sys
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from decouple import config
|
from decouple import config
|
||||||
from importlib import resources
|
|
||||||
import subprocess
|
import subprocess
|
||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
import click
|
import click
|
||||||
|
import importlib.resources
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from .util import include_exclude_check, get_parsed_stack_config, global_options2
|
from .util import include_exclude_check, get_parsed_stack_config
|
||||||
from .deployment_create import create as deployment_create
|
|
||||||
from .deployment_create import init as deployment_init
|
|
||||||
|
|
||||||
|
|
||||||
class DeployCommandContext(object):
|
class DeployCommandContext(object):
|
||||||
def __init__(self, cluster_context, docker):
|
def __init__(self, cluster_context, docker):
|
||||||
|
|
@ -46,40 +43,44 @@ class DeployCommandContext(object):
|
||||||
def command(ctx, include, exclude, env_file, cluster):
|
def command(ctx, include, exclude, env_file, cluster):
|
||||||
'''deploy a stack'''
|
'''deploy a stack'''
|
||||||
|
|
||||||
if ctx.parent.obj.debug:
|
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster, env_file)
|
||||||
print(f"ctx.parent.obj: {ctx.parent.obj}")
|
|
||||||
ctx.obj = create_deploy_context(global_options2(ctx), global_options2(ctx).stack, include, exclude, cluster, env_file)
|
|
||||||
# Subcommand is executed now, by the magic of click
|
|
||||||
|
|
||||||
|
|
||||||
def create_deploy_context(global_context, stack, include, exclude, cluster, env_file):
|
|
||||||
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
|
|
||||||
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
|
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
|
||||||
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
|
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
|
||||||
compose_env_file=cluster_context.env_file)
|
compose_env_file=cluster_context.env_file)
|
||||||
return DeployCommandContext(cluster_context, docker)
|
|
||||||
|
ctx.obj = DeployCommandContext(cluster_context, docker)
|
||||||
|
# Subcommand is executed now, by the magic of click
|
||||||
|
|
||||||
|
|
||||||
def up_operation(ctx, services_list):
|
@command.command()
|
||||||
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
|
@click.pass_context
|
||||||
|
def up(ctx, extra_args):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
extra_args_list = list(extra_args) or None
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
cluster_context = deploy_context.cluster_context
|
cluster_context = ctx.obj.cluster_context
|
||||||
container_exec_env = _make_runtime_env(global_context)
|
container_exec_env = _make_runtime_env(global_context)
|
||||||
for attr, value in container_exec_env.items():
|
for attr, value in container_exec_env.items():
|
||||||
os.environ[attr] = value
|
os.environ[attr] = value
|
||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}")
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
deploy_context.docker.compose.up(detach=True, services=services_list)
|
ctx.obj.docker.compose.up(detach=True, services=extra_args_list)
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
|
_orchestrate_cluster_config(global_context, cluster_context.config, ctx.obj.docker, container_exec_env)
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list):
|
@command.command()
|
||||||
|
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
||||||
|
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
|
||||||
|
@click.pass_context
|
||||||
|
def down(ctx, delete_volumes, extra_args):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
|
extra_args_list = list(extra_args) or None
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print("Running compose down")
|
print("Running compose down")
|
||||||
|
|
@ -90,23 +91,6 @@ def down_operation(ctx, delete_volumes, extra_args_list):
|
||||||
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
|
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
|
||||||
@click.pass_context
|
|
||||||
def up(ctx, extra_args):
|
|
||||||
extra_args_list = list(extra_args) or None
|
|
||||||
up_operation(ctx, extra_args_list)
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
|
|
||||||
@click.pass_context
|
|
||||||
def down(ctx, delete_volumes, extra_args):
|
|
||||||
extra_args_list = list(extra_args) or None
|
|
||||||
down_operation(ctx, delete_volumes, extra_args_list)
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def ps(ctx):
|
def ps(ctx):
|
||||||
|
|
@ -191,7 +175,7 @@ def get_stack_status(ctx, stack):
|
||||||
ctx_copy = copy.copy(ctx)
|
ctx_copy = copy.copy(ctx)
|
||||||
ctx_copy.stack = stack
|
ctx_copy.stack = stack
|
||||||
|
|
||||||
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
cluster_context = _make_cluster_context(ctx_copy, None, None, None, None)
|
||||||
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
|
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
|
||||||
# TODO: refactor to avoid duplicating this code above
|
# TODO: refactor to avoid duplicating this code above
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
|
|
@ -216,8 +200,7 @@ def _make_runtime_env(ctx):
|
||||||
return container_exec_env
|
return container_exec_env
|
||||||
|
|
||||||
|
|
||||||
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
def _make_cluster_context(ctx, include, exclude, cluster, env_file):
|
||||||
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|
||||||
|
|
||||||
if ctx.local_stack:
|
if ctx.local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
|
|
@ -225,20 +208,14 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
|
||||||
# TODO: huge hack, fix this
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# If the caller passed a path for the stack file, then we know that we can get the compose files
|
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
|
||||||
# from the same directory
|
|
||||||
if isinstance(stack, os.PathLike):
|
|
||||||
compose_dir = stack.parent
|
|
||||||
else:
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
|
||||||
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
|
|
||||||
|
|
||||||
if cluster is None:
|
if cluster is None:
|
||||||
# Create default unique, stable cluster name from confile file path and stack name if provided
|
# Create default unique, stable cluster name from confile file path and stack name if provided
|
||||||
# TODO: change this to the config file path
|
# TODO: change this to the config file path
|
||||||
path = os.path.realpath(sys.argv[0])
|
path = os.path.realpath(sys.argv[0])
|
||||||
unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
|
unique_cluster_descriptor = f"{path},{ctx.stack},{include},{exclude}"
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"pre-hash descriptor: {unique_cluster_descriptor}")
|
print(f"pre-hash descriptor: {unique_cluster_descriptor}")
|
||||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
||||||
|
|
@ -248,12 +225,12 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from . import data
|
from . import data
|
||||||
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file:
|
||||||
all_pods = pod_list_file.read().splitlines()
|
all_pods = pod_list_file.read().splitlines()
|
||||||
|
|
||||||
pods_in_scope = []
|
pods_in_scope = []
|
||||||
if stack:
|
if ctx.stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(ctx.stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
pods_in_scope = stack_config['pods']
|
pods_in_scope = stack_config['pods']
|
||||||
cluster_config = stack_config['config'] if 'config' in stack_config else None
|
cluster_config = stack_config['config'] if 'config' in stack_config else None
|
||||||
|
|
@ -399,7 +376,3 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
|
||||||
waiting_for_data = False
|
waiting_for_data = False
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"destination output: {destination_output}")
|
print(f"destination output: {destination_output}")
|
||||||
|
|
||||||
|
|
||||||
command.add_command(deployment_init)
|
|
||||||
command.add_command(deployment_create)
|
|
||||||
|
|
@ -1,98 +0,0 @@
|
||||||
# Copyright © 2022, 2023 Cerc
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import click
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from pathlib import Path
|
|
||||||
import sys
|
|
||||||
from .deploy import up_operation, create_deploy_context
|
|
||||||
from .util import global_options
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class DeploymentContext:
|
|
||||||
dir: Path
|
|
||||||
|
|
||||||
@click.group()
|
|
||||||
@click.option("--dir", required=True, help="path to deployment directory")
|
|
||||||
@click.pass_context
|
|
||||||
def command(ctx, dir):
|
|
||||||
# Check that --stack wasn't supplied
|
|
||||||
if ctx.parent.obj.stack:
|
|
||||||
print("Error: --stack can't be supplied with the deployment command")
|
|
||||||
sys.exit(1)
|
|
||||||
# Check dir is valid
|
|
||||||
dir_path = Path(dir)
|
|
||||||
if not dir_path.exists():
|
|
||||||
print(f"Error: deployment directory {dir} does not exist")
|
|
||||||
sys.exit(1)
|
|
||||||
if not dir_path.is_dir():
|
|
||||||
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
|
|
||||||
sys.exit(1)
|
|
||||||
# Store the deployment context for subcommands
|
|
||||||
ctx.obj = DeploymentContext(dir_path)
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
|
||||||
@click.pass_context
|
|
||||||
def up(ctx, extra_args):
|
|
||||||
print(f"Context: {global_options(ctx)}")
|
|
||||||
# Get the stack config file name
|
|
||||||
stack_file_path = ctx.obj.dir.joinpath("stack.yml")
|
|
||||||
# TODO: add cluster name and env file here
|
|
||||||
ctx.obj = create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, None, None)
|
|
||||||
services_list = list(extra_args) or None
|
|
||||||
up_operation(ctx, services_list)
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.pass_context
|
|
||||||
def down(ctx):
|
|
||||||
print(f"Context: {ctx.parent.obj}")
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.pass_context
|
|
||||||
def ps(ctx):
|
|
||||||
print(f"Context: {ctx.parent.obj}")
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.pass_context
|
|
||||||
def logs(ctx):
|
|
||||||
print(f"Context: {ctx.parent.obj}")
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.pass_context
|
|
||||||
def task(ctx):
|
|
||||||
print(f"Context: {ctx.parent.obj}")
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.pass_context
|
|
||||||
def status(ctx):
|
|
||||||
print(f"Context: {ctx.parent.obj}")
|
|
||||||
|
|
||||||
|
|
||||||
#from importlib import resources, util
|
|
||||||
# TODO: figure out how to do this dynamically
|
|
||||||
#stack = "mainnet-laconic"
|
|
||||||
#module_name = "commands"
|
|
||||||
#spec = util.spec_from_file_location(module_name, "./app/data/stacks/" + stack + "/deploy/commands.py")
|
|
||||||
#imported_stack = util.module_from_spec(spec)
|
|
||||||
#spec.loader.exec_module(imported_stack)
|
|
||||||
#command.add_command(imported_stack.init)
|
|
||||||
#command.add_command(imported_stack.create)
|
|
||||||
|
|
@ -1,67 +0,0 @@
|
||||||
# Copyright © 2022, 2023 Cerc
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import click
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from shutil import copyfile
|
|
||||||
import sys
|
|
||||||
from .util import get_stack_config_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options
|
|
||||||
|
|
||||||
default_spec_file_content = """stack: mainnet-laconic
|
|
||||||
data_dir: /my/path
|
|
||||||
node_name: my-node-name
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def make_default_deployment_dir():
|
|
||||||
return "deployment-001"
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--output", required=True, help="Write yaml spec file here")
|
|
||||||
@click.pass_context
|
|
||||||
def init(ctx, output):
|
|
||||||
with open(output, "w") as output_file:
|
|
||||||
output_file.write(default_spec_file_content)
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
|
||||||
@click.pass_context
|
|
||||||
def create(ctx, spec_file, deployment_dir):
|
|
||||||
# This function fails with a useful error message if the file doens't exist
|
|
||||||
parsed_spec = get_parsed_deployment_spec(spec_file)
|
|
||||||
stack_file = get_stack_config_path(parsed_spec['stack'])
|
|
||||||
parsed_stack = get_parsed_stack_config(stack_file)
|
|
||||||
if global_options(ctx).debug:
|
|
||||||
print(f"parsed spec: {parsed_spec}")
|
|
||||||
if deployment_dir is None:
|
|
||||||
deployment_dir = make_default_deployment_dir()
|
|
||||||
if os.path.exists(deployment_dir):
|
|
||||||
print(f"Error: {deployment_dir} already exists")
|
|
||||||
sys.exit(1)
|
|
||||||
os.mkdir(deployment_dir)
|
|
||||||
# Copy spec file and the stack file into the deployment dir
|
|
||||||
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
|
|
||||||
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
|
|
||||||
# Copy the pod files into the deployment dir
|
|
||||||
pods = parsed_stack['pods']
|
|
||||||
# TODO: refactor to use common code with deploy command
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
|
||||||
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
|
|
||||||
for pod in pods:
|
|
||||||
pod_file_path = os.path.join(compose_dir, f"docker-compose-{pod}.yml")
|
|
||||||
copyfile(pod_file_path, os.path.join(deployment_dir, os.path.basename(pod_file_path)))
|
|
||||||
32
app/util.py
32
app/util.py
|
|
@ -30,16 +30,10 @@ def include_exclude_check(s, include, exclude):
|
||||||
return s not in exclude_list
|
return s not in exclude_list
|
||||||
|
|
||||||
|
|
||||||
def get_stack_config_path(stack):
|
def get_parsed_stack_config(stack):
|
||||||
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
|
stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
|
||||||
return stack_file_path
|
|
||||||
|
|
||||||
|
|
||||||
# Caller can pass either the name of a stack, or a path to a stack file
|
|
||||||
def get_parsed_stack_config(stack):
|
|
||||||
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_config_path(stack)
|
|
||||||
try:
|
try:
|
||||||
with stack_file_path:
|
with stack_file_path:
|
||||||
stack_config = yaml.safe_load(open(stack_file_path, "r"))
|
stack_config = yaml.safe_load(open(stack_file_path, "r"))
|
||||||
|
|
@ -54,27 +48,3 @@ def get_parsed_stack_config(stack):
|
||||||
print(f"Error: stack: {stack} does not exist")
|
print(f"Error: stack: {stack} does not exist")
|
||||||
print(f"Exiting, error: {error}")
|
print(f"Exiting, error: {error}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def get_parsed_deployment_spec(spec_file):
|
|
||||||
spec_file_path = Path(spec_file)
|
|
||||||
try:
|
|
||||||
with spec_file_path:
|
|
||||||
deploy_spec = yaml.safe_load(open(spec_file_path, "r"))
|
|
||||||
return deploy_spec
|
|
||||||
except FileNotFoundError as error:
|
|
||||||
# We try here to generate a useful diagnostic error
|
|
||||||
print(f"Error: spec file: {spec_file_path} does not exist")
|
|
||||||
print(f"Exiting, error: {error}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: this is fragile wrt to the subcommand depth
|
|
||||||
# See also: https://github.com/pallets/click/issues/108
|
|
||||||
def global_options(ctx):
|
|
||||||
return ctx.parent.parent.obj
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: hack
|
|
||||||
def global_options2(ctx):
|
|
||||||
return ctx.parent.obj
|
|
||||||
|
|
|
||||||
8
cli.py
8
cli.py
|
|
@ -19,9 +19,8 @@ from dataclasses import dataclass
|
||||||
from app import setup_repositories
|
from app import setup_repositories
|
||||||
from app import build_containers
|
from app import build_containers
|
||||||
from app import build_npms
|
from app import build_npms
|
||||||
from app import deploy
|
from app import deploy_system
|
||||||
from app import version
|
from app import version
|
||||||
from app import deployment
|
|
||||||
|
|
||||||
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
|
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
|
||||||
|
|
||||||
|
|
@ -55,7 +54,6 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
|
||||||
cli.add_command(setup_repositories.command, "setup-repositories")
|
cli.add_command(setup_repositories.command, "setup-repositories")
|
||||||
cli.add_command(build_containers.command, "build-containers")
|
cli.add_command(build_containers.command, "build-containers")
|
||||||
cli.add_command(build_npms.command, "build-npms")
|
cli.add_command(build_npms.command, "build-npms")
|
||||||
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
|
cli.add_command(deploy_system.command, "deploy") # deploy is an alias for deploy-system
|
||||||
cli.add_command(deploy.command, "deploy-system")
|
cli.add_command(deploy_system.command, "deploy-system")
|
||||||
cli.add_command(deployment.command, "deployment")
|
|
||||||
cli.add_command(version.command, "version")
|
cli.add_command(version.command, "version")
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,44 @@
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
# Used for easily testing stacks-in-development on cloud platforms
|
||||||
|
# Assumes Ubuntu, edit the last line if targeting a different OS
|
||||||
|
|
||||||
|
# Once SSH'd into the server, run:
|
||||||
|
# `$ cd stack-orchestrator`
|
||||||
|
# `$ git checkout <branch>
|
||||||
|
# `$ ./scripts/developer-mode-setup.sh`
|
||||||
|
# `$ source ./venv/bin/activate`
|
||||||
|
|
||||||
|
# Followed by the stack instructions.
|
||||||
|
|
||||||
|
package_update: true
|
||||||
|
package_upgrade: true
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
system_info:
|
||||||
|
default_user:
|
||||||
|
groups: [ docker ]
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- jq
|
||||||
|
- git
|
||||||
|
- gnupg
|
||||||
|
- lsb-release
|
||||||
|
- unattended-upgrades
|
||||||
|
- python3.10-venv
|
||||||
|
- pip
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- mkdir -p /etc/apt/keyrings
|
||||||
|
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||||
|
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
|
- systemctl enable docker
|
||||||
|
- systemctl start docker
|
||||||
|
- git clone https://github.com/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
# Used for installing Stack Orchestrator on platforms that support `cloud-init`
|
||||||
|
# Tested on Ubuntu
|
||||||
|
|
||||||
|
package_update: true
|
||||||
|
package_upgrade: true
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
system_info:
|
||||||
|
default_user:
|
||||||
|
groups: [ docker ]
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- jq
|
||||||
|
- git
|
||||||
|
- gnupg
|
||||||
|
- lsb-release
|
||||||
|
- unattended-upgrades
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- mkdir -p /etc/apt/keyrings
|
||||||
|
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||||
|
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
|
- systemctl enable docker
|
||||||
|
- systemctl start docker
|
||||||
|
- curl -L -o /usr/local/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
|
- chmod +x /usr/local/bin/laconic-so
|
||||||
Loading…
Reference in New Issue