fix: add pre-commit hooks and fix all lint/type/format errors

Process bug fix: no pre-commit existed for this repo's Python code.
Added pyproject.toml with unified dependencies (ruff, mypy, ansible-lint),
.pre-commit-config.yaml with repo-based hooks (ruff) and local uv-run
hooks (mypy, ansible-lint).

Fixed 249 ruff errors (B023, B904, B006, B007, UP008, UP031, C408),
~13 mypy type errors, 11 ansible-lint violations, and ruff-format
across all Python files including stack-orchestrator subtree.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
fix/kind-mount-propagation
A. F. Dudley 2026-03-10 14:56:22 +00:00
parent 7f12270939
commit fdde3be5c8
52 changed files with 692 additions and 1221 deletions

View File

@ -1,2 +1,2 @@
Change this file to trigger running the test-database CI job Change this file to trigger running the test-database CI job
Trigger test run Trigger test run

View File

@ -1,12 +1,12 @@
# See # See
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78 # https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
from setuptools import setup, find_packages from setuptools import find_packages, setup
with open("README.md", "r", encoding="utf-8") as fh: with open("README.md", encoding="utf-8") as fh:
long_description = fh.read() long_description = fh.read()
with open("requirements.txt", "r", encoding="utf-8") as fh: with open("requirements.txt", encoding="utf-8") as fh:
requirements = fh.read() requirements = fh.read()
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh: with open("stack_orchestrator/data/version.txt", encoding="utf-8") as fh:
version = fh.readlines()[-1].strip(" \n") version = fh.readlines()[-1].strip(" \n")
setup( setup(
name="laconic-stack-orchestrator", name="laconic-stack-orchestrator",

View File

@ -15,9 +15,11 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from stack_orchestrator.deploy.deploy import get_stack_status
from decouple import config from decouple import config
from stack_orchestrator.deploy.deploy import get_stack_status
def get_stack(config, stack): def get_stack(config, stack):
if stack == "package-registry": if stack == "package-registry":

View File

@ -22,17 +22,19 @@
# allow re-build of either all or specific containers # allow re-build of either all or specific containers
import os import os
import sys
from decouple import config
import subprocess import subprocess
import click import sys
from pathlib import Path from pathlib import Path
from stack_orchestrator.opts import opts
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit import click
from decouple import config
from stack_orchestrator.base import get_npm_registry_url from stack_orchestrator.base import get_npm_registry_url
from stack_orchestrator.build.build_types import BuildContext from stack_orchestrator.build.build_types import BuildContext
from stack_orchestrator.build.publish import publish_image
from stack_orchestrator.build.build_util import get_containers_in_scope from stack_orchestrator.build.build_util import get_containers_in_scope
from stack_orchestrator.build.publish import publish_image
from stack_orchestrator.opts import opts
from stack_orchestrator.util import error_exit, include_exclude_check, stack_is_external
# TODO: find a place for this # TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: # epilog="Config provided either in .env or settings.ini or env vars:
@ -59,9 +61,7 @@ def make_container_build_env(
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
container_build_env.update( container_build_env.update(
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} {"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}
if extra_build_args
else {}
) )
docker_host_env = os.getenv("DOCKER_HOST") docker_host_env = os.getenv("DOCKER_HOST")
if docker_host_env: if docker_host_env:
@ -81,12 +81,8 @@ def process_container(build_context: BuildContext) -> bool:
# Check if this is in an external stack # Check if this is in an external stack
if stack_is_external(build_context.stack): if stack_is_external(build_context.stack):
container_parent_dir = Path(build_context.stack).parent.parent.joinpath( container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
"container-build" temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
)
temp_build_dir = container_parent_dir.joinpath(
build_context.container.replace("/", "-")
)
temp_build_script_filename = temp_build_dir.joinpath("build.sh") temp_build_script_filename = temp_build_dir.joinpath("build.sh")
# Now check if the container exists in the external stack. # Now check if the container exists in the external stack.
if not temp_build_script_filename.exists(): if not temp_build_script_filename.exists():
@ -104,18 +100,13 @@ def process_container(build_context: BuildContext) -> bool:
build_command = build_script_filename.as_posix() build_command = build_script_filename.as_posix()
else: else:
if opts.o.verbose: if opts.o.verbose:
print( print(f"No script file found: {build_script_filename}, " "using default build script")
f"No script file found: {build_script_filename}, "
"using default build script"
)
repo_dir = build_context.container.split("/")[1] repo_dir = build_context.container.split("/")[1]
# TODO: make this less of a hack -- should be specified in # TODO: make this less of a hack -- should be specified in
# some metadata somewhere. Check if we have a repo for this # some metadata somewhere. Check if we have a repo for this
# container. If not, set the context dir to container-build subdir # container. If not, set the context dir to container-build subdir
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir) repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
repo_dir_or_build_dir = ( repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
repo_full_path if os.path.exists(repo_full_path) else build_dir
)
build_command = ( build_command = (
os.path.join(build_context.container_build_dir, "default-build.sh") os.path.join(build_context.container_build_dir, "default-build.sh")
+ f" {default_container_tag} {repo_dir_or_build_dir}" + f" {default_container_tag} {repo_dir_or_build_dir}"
@ -159,9 +150,7 @@ def process_container(build_context: BuildContext) -> bool:
default=False, default=False,
help="Publish the built images in the specified image registry", help="Publish the built images in the specified image registry",
) )
@click.option( @click.option("--image-registry", help="Specify the image registry for --publish-images")
"--image-registry", help="Specify the image registry for --publish-images"
)
@click.pass_context @click.pass_context
def command( def command(
ctx, ctx,
@ -185,14 +174,9 @@ def command(
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
print( print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " f"{dev_root_path}")
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
f"{dev_root_path}"
)
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
config("CERC_REPO_BASE_DIR", default="~/cerc")
)
if not opts.o.quiet: if not opts.o.quiet:
print(f"Dev Root is: {dev_root_path}") print(f"Dev Root is: {dev_root_path}")
@ -230,10 +214,7 @@ def command(
else: else:
print(f"Error running build for {build_context.container}") print(f"Error running build for {build_context.container}")
if not opts.o.continue_on_error: if not opts.o.continue_on_error:
error_exit( error_exit("container build failed and --continue-on-error " "not set, exiting")
"container build failed and --continue-on-error "
"not set, exiting"
)
sys.exit(1) sys.exit(1)
else: else:
print( print(

View File

@ -18,15 +18,17 @@
# env vars: # env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc # CERC_REPO_BASE_DIR defaults to ~/cerc
import importlib.resources
import os import os
import sys import sys
from shutil import rmtree, copytree from shutil import copytree, rmtree
from decouple import config
import click import click
import importlib.resources from decouple import config
from python_on_whales import docker, DockerException from python_on_whales import DockerException, docker
from stack_orchestrator.base import get_stack from stack_orchestrator.base import get_stack
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config from stack_orchestrator.util import get_parsed_stack_config, include_exclude_check
builder_js_image_name = "cerc/builder-js:local" builder_js_image_name = "cerc/builder-js:local"
@ -70,14 +72,9 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
print( print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " f"{dev_root_path}")
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
f"{dev_root_path}"
)
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
config("CERC_REPO_BASE_DIR", default="~/cerc")
)
build_root_path = os.path.join(dev_root_path, "build-trees") build_root_path = os.path.join(dev_root_path, "build-trees")
@ -94,9 +91,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from stack_orchestrator import data
with importlib.resources.open_text( with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
data, "npm-package-list.txt"
) as package_list_file:
all_packages = package_list_file.read().splitlines() all_packages = package_list_file.read().splitlines()
packages_in_scope = [] packages_in_scope = []
@ -132,8 +127,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
build_command = [ build_command = [
"sh", "sh",
"-c", "-c",
"cd /workspace && " "cd /workspace && " f"build-npm-package-local-dependencies.sh {npm_registry_url}",
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
] ]
if not dry_run: if not dry_run:
if verbose: if verbose:
@ -151,9 +145,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
envs.update( envs.update(
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} {"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}
if extra_build_args
else {}
) )
try: try:
docker.run( docker.run(
@ -176,16 +168,10 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
except DockerException as e: except DockerException as e:
print(f"Error executing build for {package} in container:\n {e}") print(f"Error executing build for {package} in container:\n {e}")
if not continue_on_error: if not continue_on_error:
print( print("FATAL Error: build failed and --continue-on-error " "not set, exiting")
"FATAL Error: build failed and --continue-on-error "
"not set, exiting"
)
sys.exit(1) sys.exit(1)
else: else:
print( print("****** Build Error, continuing because " "--continue-on-error is set")
"****** Build Error, continuing because "
"--continue-on-error is set"
)
else: else:
print("Skipped") print("Skipped")
@ -203,10 +189,7 @@ def _ensure_prerequisites():
# Tell the user how to build it if not # Tell the user how to build it if not
images = docker.image.list(builder_js_image_name) images = docker.image.list(builder_js_image_name)
if len(images) == 0: if len(images) == 0:
print( print(f"FATAL: builder image: {builder_js_image_name} is required " "but was not found")
f"FATAL: builder image: {builder_js_image_name} is required "
"but was not found"
)
print( print(
"Please run this command to create it: " "Please run this command to create it: "
"laconic-so --stack build-support build-containers" "laconic-so --stack build-support build-containers"

View File

@ -16,7 +16,6 @@
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Mapping
@dataclass @dataclass
@ -24,5 +23,5 @@ class BuildContext:
stack: str stack: str
container: str container: str
container_build_dir: Path container_build_dir: Path
container_build_env: Mapping[str, str] container_build_env: dict[str, str]
dev_root_path: str dev_root_path: str

View File

@ -30,9 +30,7 @@ def get_containers_in_scope(stack: str):
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from stack_orchestrator import data
with importlib.resources.open_text( with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
data, "container-image-list.txt"
) as container_list_file:
containers_in_scope = container_list_file.read().splitlines() containers_in_scope = container_list_file.read().splitlines()
if opts.o.verbose: if opts.o.verbose:

View File

@ -23,20 +23,19 @@
import os import os
import sys import sys
from decouple import config
import click
from pathlib import Path from pathlib import Path
import click
from decouple import config
from stack_orchestrator.build import build_containers from stack_orchestrator.build import build_containers
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
from stack_orchestrator.build.build_types import BuildContext from stack_orchestrator.build.build_types import BuildContext
from stack_orchestrator.deploy.webapp.util import TimedLogger, determine_base_container
@click.command() @click.command()
@click.option("--base-container") @click.option("--base-container")
@click.option( @click.option("--source-repo", help="directory containing the webapp to build", required=True)
"--source-repo", help="directory containing the webapp to build", required=True
)
@click.option( @click.option(
"--force-rebuild", "--force-rebuild",
is_flag=True, is_flag=True,
@ -64,13 +63,10 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
logger.log( logger.log(
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " f"{dev_root_path}"
f"{dev_root_path}"
) )
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
config("CERC_REPO_BASE_DIR", default="~/cerc")
)
if verbose: if verbose:
logger.log(f"Dev Root is: {dev_root_path}") logger.log(f"Dev Root is: {dev_root_path}")

View File

@ -13,19 +13,19 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
import json import json
import platform import platform
from dataclasses import dataclass
import click
import requests
from python_on_whales import DockerClient from python_on_whales import DockerClient
from python_on_whales.components.manifest.cli_wrapper import ManifestCLI, ManifestList from python_on_whales.components.manifest.cli_wrapper import ManifestCLI, ManifestList
from python_on_whales.utils import run from python_on_whales.utils import run
import requests
from typing import List
from stack_orchestrator.opts import opts
from stack_orchestrator.util import include_exclude_check, error_exit
from stack_orchestrator.build.build_util import get_containers_in_scope from stack_orchestrator.build.build_util import get_containers_in_scope
from stack_orchestrator.opts import opts
from stack_orchestrator.util import error_exit, include_exclude_check
# Experimental fetch-container command # Experimental fetch-container command
@ -55,7 +55,7 @@ def _local_tag_for(container: str):
# $ curl -u "my-username:my-token" -X GET \ # $ curl -u "my-username:my-token" -X GET \
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list" # "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]} # {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]: def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> list[str]:
# registry looks like: git.vdb.to/cerc-io # registry looks like: git.vdb.to/cerc-io
registry_parts = registry_info.registry.split("/") registry_parts = registry_info.registry.split("/")
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list" url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
@ -68,16 +68,15 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
tag_info = response.json() tag_info = response.json()
if opts.o.debug: if opts.o.debug:
print(f"container tags list: {tag_info}") print(f"container tags list: {tag_info}")
tags_array = tag_info["tags"] tags_array: list[str] = tag_info["tags"]
return tags_array return tags_array
else: else:
error_exit( error_exit(
f"failed to fetch tags from image registry, " f"failed to fetch tags from image registry, " f"status code: {response.status_code}"
f"status code: {response.status_code}"
) )
def _find_latest(candidate_tags: List[str]): def _find_latest(candidate_tags: list[str]):
# Lex sort should give us the latest first # Lex sort should give us the latest first
sorted_candidates = sorted(candidate_tags) sorted_candidates = sorted(candidate_tags)
if opts.o.debug: if opts.o.debug:
@ -86,8 +85,8 @@ def _find_latest(candidate_tags: List[str]):
def _filter_for_platform( def _filter_for_platform(
container: str, registry_info: RegistryInfo, tag_list: List[str] container: str, registry_info: RegistryInfo, tag_list: list[str]
) -> List[str]: ) -> list[str]:
filtered_tags = [] filtered_tags = []
this_machine = platform.machine() this_machine = platform.machine()
# Translate between Python and docker platform names # Translate between Python and docker platform names
@ -151,15 +150,9 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
default=False, default=False,
help="Overwrite a locally built image, if present", help="Overwrite a locally built image, if present",
) )
@click.option( @click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
"--image-registry", required=True, help="Specify the image registry to fetch from" @click.option("--registry-username", required=True, help="Specify the image registry username")
) @click.option("--registry-token", required=True, help="Specify the image registry access token")
@click.option(
"--registry-username", required=True, help="Specify the image registry username"
)
@click.option(
"--registry-token", required=True, help="Specify the image registry access token"
)
@click.pass_context @click.pass_context
def command( def command(
ctx, ctx,

View File

@ -14,6 +14,7 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from datetime import datetime from datetime import datetime
from python_on_whales import DockerClient from python_on_whales import DockerClient
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts

View File

@ -2,12 +2,11 @@
import argparse import argparse
import os import os
import random
import sys import sys
from subprocess import Popen
import psycopg import psycopg
import random
from subprocess import Popen
from fabric import Connection from fabric import Connection
@ -27,27 +26,19 @@ def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_na
def establish_ssh_tunnel(ssh_host, ssh_port, ssh_user, db_host, db_port): def establish_ssh_tunnel(ssh_host, ssh_port, ssh_user, db_host, db_port):
local_port = random.randint(11000, 12000) local_port = random.randint(11000, 12000)
conn = Connection(host=ssh_host, port=ssh_port, user=ssh_user) conn = Connection(host=ssh_host, port=ssh_port, user=ssh_user)
fw = conn.forward_local( fw = conn.forward_local(local_port=local_port, remote_port=db_port, remote_host=db_host)
local_port=local_port, remote_port=db_port, remote_host=db_host
)
return conn, fw, local_port return conn, fw, local_port
def load_db_from_file(db_host, db_port, db_user, db_password, db_name, file_name): def load_db_from_file(db_host, db_port, db_user, db_password, db_name, file_name):
connstr = "host=%s port=%s user=%s password=%s sslmode=disable dbname=%s" % ( connstr = f"host={db_host} port={db_port} user={db_user} password={db_password} sslmode=disable dbname={db_name}"
db_host,
db_port,
db_user,
db_password,
db_name,
)
with psycopg.connect(connstr) as conn: with psycopg.connect(connstr) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
print( print(
f"Importing from {file_name} to {db_host}:{db_port}/{db_name}... ", f"Importing from {file_name} to {db_host}:{db_port}/{db_name}... ",
end="", end="",
) )
cur.execute(open(file_name, "rt").read()) cur.execute(open(file_name).read())
print("DONE") print("DONE")
@ -60,9 +51,7 @@ if __name__ == "__main__":
parser.add_argument("--src-dbpw", help="DB password", required=True) parser.add_argument("--src-dbpw", help="DB password", required=True)
parser.add_argument("--src-dbname", help="dbname", default="keycloak") parser.add_argument("--src-dbname", help="dbname", default="keycloak")
parser.add_argument( parser.add_argument("--dst-file", help="Destination filename", default="keycloak-mirror.sql")
"--dst-file", help="Destination filename", default="keycloak-mirror.sql"
)
parser.add_argument("--live-import", help="run the import", action="store_true") parser.add_argument("--live-import", help="run the import", action="store_true")

View File

@ -1,7 +1,8 @@
from web3.auto import w3
import ruamel.yaml as yaml
import sys import sys
import ruamel.yaml as yaml
from web3.auto import w3
w3.eth.account.enable_unaudited_hdwallet_features() w3.eth.account.enable_unaudited_hdwallet_features()
testnet_config_path = "genesis-config.yaml" testnet_config_path = "genesis-config.yaml"
@ -11,8 +12,6 @@ if len(sys.argv) > 1:
with open(testnet_config_path) as stream: with open(testnet_config_path) as stream:
data = yaml.safe_load(stream) data = yaml.safe_load(stream)
for key, value in data["el_premine"].items(): for key, _value in data["el_premine"].items():
acct = w3.eth.account.from_mnemonic( acct = w3.eth.account.from_mnemonic(data["mnemonic"], account_path=key, passphrase="")
data["mnemonic"], account_path=key, passphrase="" print(f"{key},{acct.address},{acct.key.hex()}")
)
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))

View File

@ -16,13 +16,14 @@
from pathlib import Path from pathlib import Path
from shutil import copy from shutil import copy
import yaml import yaml
def create(context, extra_args): def create(context, extra_args):
# Our goal here is just to copy the json files for blast # Our goal here is just to copy the json files for blast
yml_path = context.deployment_dir.joinpath("spec.yml") yml_path = context.deployment_dir.joinpath("spec.yml")
with open(yml_path, "r") as file: with open(yml_path) as file:
data = yaml.safe_load(file) data = yaml.safe_load(file)
mount_point = data["volumes"]["blast-data"] mount_point = data["volumes"]["blast-data"]

View File

@ -27,8 +27,6 @@ def setup(ctx):
def create(ctx, extra_args): def create(ctx, extra_args):
# Generate the JWT secret and save to its config file # Generate the JWT secret and save to its config file
secret = token_hex(32) secret = token_hex(32)
jwt_file_path = ctx.deployment_dir.joinpath( jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
"data", "mainnet_eth_config_data", "jwtsecret"
)
with open(jwt_file_path, "w+") as jwt_file: with open(jwt_file_path, "w+") as jwt_file:
jwt_file.write(secret) jwt_file.write(secret)

View File

@ -13,22 +13,23 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from stack_orchestrator.util import get_yaml import os
import re
import sys
from enum import Enum
from pathlib import Path
from shutil import copyfile, copytree
import tomli
from stack_orchestrator.deploy.deploy_types import ( from stack_orchestrator.deploy.deploy_types import (
DeployCommandContext, DeployCommandContext,
LaconicStackSetupCommand, LaconicStackSetupCommand,
) )
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.stack_state import State from stack_orchestrator.deploy.stack_state import State
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts
from enum import Enum from stack_orchestrator.util import get_yaml
from pathlib import Path
from shutil import copyfile, copytree
import os
import sys
import tomli
import re
default_spec_file_content = "" default_spec_file_content = ""
@ -80,9 +81,7 @@ def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
gentx_file_path = Path(gentx_file) gentx_file_path = Path(gentx_file)
copyfile( copyfile(
gentx_file_path, gentx_file_path,
os.path.join( os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)),
network_dir, "config", "gentx", os.path.basename(gentx_file_path)
),
) )
@ -91,7 +90,7 @@ def _remove_persistent_peers(network_dir: Path):
if not config_file_path.exists(): if not config_file_path.exists():
print("Error: config.toml not found") print("Error: config.toml not found")
sys.exit(1) sys.exit(1)
with open(config_file_path, "r") as input_file: with open(config_file_path) as input_file:
config_file_content = input_file.read() config_file_content = input_file.read()
persistent_peers_pattern = '^persistent_peers = "(.+?)"' persistent_peers_pattern = '^persistent_peers = "(.+?)"'
replace_with = 'persistent_peers = ""' replace_with = 'persistent_peers = ""'
@ -110,7 +109,7 @@ def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
if not config_file_path.exists(): if not config_file_path.exists():
print("Error: config.toml not found") print("Error: config.toml not found")
sys.exit(1) sys.exit(1)
with open(config_file_path, "r") as input_file: with open(config_file_path) as input_file:
config_file_content = input_file.read() config_file_content = input_file.read()
persistent_peers_pattern = r'^persistent_peers = ""' persistent_peers_pattern = r'^persistent_peers = ""'
replace_with = f'persistent_peers = "{new_persistent_peers}"' replace_with = f'persistent_peers = "{new_persistent_peers}"'
@ -129,7 +128,7 @@ def _enable_cors(config_dir: Path):
if not config_file_path.exists(): if not config_file_path.exists():
print("Error: config.toml not found") print("Error: config.toml not found")
sys.exit(1) sys.exit(1)
with open(config_file_path, "r") as input_file: with open(config_file_path) as input_file:
config_file_content = input_file.read() config_file_content = input_file.read()
cors_pattern = r"^cors_allowed_origins = \[]" cors_pattern = r"^cors_allowed_origins = \[]"
replace_with = 'cors_allowed_origins = ["*"]' replace_with = 'cors_allowed_origins = ["*"]'
@ -142,13 +141,11 @@ def _enable_cors(config_dir: Path):
if not app_file_path.exists(): if not app_file_path.exists():
print("Error: app.toml not found") print("Error: app.toml not found")
sys.exit(1) sys.exit(1)
with open(app_file_path, "r") as input_file: with open(app_file_path) as input_file:
app_file_content = input_file.read() app_file_content = input_file.read()
cors_pattern = r"^enabled-unsafe-cors = false" cors_pattern = r"^enabled-unsafe-cors = false"
replace_with = "enabled-unsafe-cors = true" replace_with = "enabled-unsafe-cors = true"
app_file_content = re.sub( app_file_content = re.sub(cors_pattern, replace_with, app_file_content, flags=re.MULTILINE)
cors_pattern, replace_with, app_file_content, flags=re.MULTILINE
)
with open(app_file_path, "w") as output_file: with open(app_file_path, "w") as output_file:
output_file.write(app_file_content) output_file.write(app_file_content)
@ -158,7 +155,7 @@ def _set_listen_address(config_dir: Path):
if not config_file_path.exists(): if not config_file_path.exists():
print("Error: config.toml not found") print("Error: config.toml not found")
sys.exit(1) sys.exit(1)
with open(config_file_path, "r") as input_file: with open(config_file_path) as input_file:
config_file_content = input_file.read() config_file_content = input_file.read()
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"' existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
replace_with = 'laddr = "tcp://0.0.0.0:26657"' replace_with = 'laddr = "tcp://0.0.0.0:26657"'
@ -172,7 +169,7 @@ def _set_listen_address(config_dir: Path):
if not app_file_path.exists(): if not app_file_path.exists():
print("Error: app.toml not found") print("Error: app.toml not found")
sys.exit(1) sys.exit(1)
with open(app_file_path, "r") as input_file: with open(app_file_path) as input_file:
app_file_content = input_file.read() app_file_content = input_file.read()
existing_pattern1 = r'^address = "tcp://localhost:1317"' existing_pattern1 = r'^address = "tcp://localhost:1317"'
replace_with1 = 'address = "tcp://0.0.0.0:1317"' replace_with1 = 'address = "tcp://0.0.0.0:1317"'
@ -192,10 +189,7 @@ def _phase_from_params(parameters):
phase = SetupPhase.ILLEGAL phase = SetupPhase.ILLEGAL
if parameters.initialize_network: if parameters.initialize_network:
if parameters.join_network or parameters.create_network: if parameters.join_network or parameters.create_network:
print( print("Can't supply --join-network or --create-network " "with --initialize-network")
"Can't supply --join-network or --create-network "
"with --initialize-network"
)
sys.exit(1) sys.exit(1)
if not parameters.chain_id: if not parameters.chain_id:
print("--chain-id is required") print("--chain-id is required")
@ -207,26 +201,17 @@ def _phase_from_params(parameters):
phase = SetupPhase.INITIALIZE phase = SetupPhase.INITIALIZE
elif parameters.join_network: elif parameters.join_network:
if parameters.initialize_network or parameters.create_network: if parameters.initialize_network or parameters.create_network:
print( print("Can't supply --initialize-network or --create-network " "with --join-network")
"Can't supply --initialize-network or --create-network "
"with --join-network"
)
sys.exit(1) sys.exit(1)
phase = SetupPhase.JOIN phase = SetupPhase.JOIN
elif parameters.create_network: elif parameters.create_network:
if parameters.initialize_network or parameters.join_network: if parameters.initialize_network or parameters.join_network:
print( print("Can't supply --initialize-network or --join-network " "with --create-network")
"Can't supply --initialize-network or --join-network "
"with --create-network"
)
sys.exit(1) sys.exit(1)
phase = SetupPhase.CREATE phase = SetupPhase.CREATE
elif parameters.connect_network: elif parameters.connect_network:
if parameters.initialize_network or parameters.join_network: if parameters.initialize_network or parameters.join_network:
print( print("Can't supply --initialize-network or --join-network " "with --connect-network")
"Can't supply --initialize-network or --join-network "
"with --connect-network"
)
sys.exit(1) sys.exit(1)
phase = SetupPhase.CONNECT phase = SetupPhase.CONNECT
return phase return phase
@ -341,8 +326,7 @@ def setup(
output3, status3 = run_container_command( output3, status3 = run_container_command(
command_context, command_context,
"laconicd", "laconicd",
f"laconicd cometbft show-validator " f"laconicd cometbft show-validator " f"--home {laconicd_home_path_in_container}",
f"--home {laconicd_home_path_in_container}",
mounts, mounts,
) )
print(f"Node validator address: {output3}") print(f"Node validator address: {output3}")
@ -361,23 +345,16 @@ def setup(
# Copy it into our network dir # Copy it into our network dir
genesis_file_path = Path(parameters.genesis_file) genesis_file_path = Path(parameters.genesis_file)
if not os.path.exists(genesis_file_path): if not os.path.exists(genesis_file_path):
print( print(f"Error: supplied genesis file: {parameters.genesis_file} " "does not exist.")
f"Error: supplied genesis file: {parameters.genesis_file} "
"does not exist."
)
sys.exit(1) sys.exit(1)
copyfile( copyfile(
genesis_file_path, genesis_file_path,
os.path.join( os.path.join(network_dir, "config", os.path.basename(genesis_file_path)),
network_dir, "config", os.path.basename(genesis_file_path)
),
) )
else: else:
# We're generating the genesis file # We're generating the genesis file
# First look in the supplied gentx files for the other nodes' keys # First look in the supplied gentx files for the other nodes' keys
other_node_keys = _get_node_keys_from_gentx_files( other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_address_list)
parameters.gentx_address_list
)
# Add those keys to our genesis, with balances we determine here (why?) # Add those keys to our genesis, with balances we determine here (why?)
outputk = None outputk = None
for other_node_key in other_node_keys: for other_node_key in other_node_keys:
@ -398,8 +375,7 @@ def setup(
output1, status1 = run_container_command( output1, status1 = run_container_command(
command_context, command_context,
"laconicd", "laconicd",
f"laconicd genesis collect-gentxs " f"laconicd genesis collect-gentxs " f"--home {laconicd_home_path_in_container}",
f"--home {laconicd_home_path_in_container}",
mounts, mounts,
) )
if options.debug: if options.debug:
@ -416,8 +392,7 @@ def setup(
output2, status1 = run_container_command( output2, status1 = run_container_command(
command_context, command_context,
"laconicd", "laconicd",
f"laconicd genesis validate-genesis " f"laconicd genesis validate-genesis " f"--home {laconicd_home_path_in_container}",
f"--home {laconicd_home_path_in_container}",
mounts, mounts,
) )
print(f"validate-genesis result: {output2}") print(f"validate-genesis result: {output2}")
@ -452,9 +427,7 @@ def create(deployment_context: DeploymentContext, extra_args):
sys.exit(1) sys.exit(1)
# Copy the network directory contents into our deployment # Copy the network directory contents into our deployment
# TODO: change this to work with non local paths # TODO: change this to work with non local paths
deployment_config_dir = deployment_context.deployment_dir.joinpath( deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
"data", "laconicd-config"
)
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True) copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
# If supplied, add the initial persistent peers to the config file # If supplied, add the initial persistent peers to the config file
if extra_args[1]: if extra_args[1]:
@ -465,9 +438,7 @@ def create(deployment_context: DeploymentContext, extra_args):
_set_listen_address(deployment_config_dir) _set_listen_address(deployment_config_dir)
# Copy the data directory contents into our deployment # Copy the data directory contents into our deployment
# TODO: change this to work with non local paths # TODO: change this to work with non local paths
deployment_data_dir = deployment_context.deployment_dir.joinpath( deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
"data", "laconicd-data"
)
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True) copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)

View File

@ -13,12 +13,13 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from stack_orchestrator.util import get_yaml from pathlib import Path
from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.stack_state import State from stack_orchestrator.deploy.stack_state import State
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command from stack_orchestrator.util import get_yaml
from pathlib import Path
default_spec_file_content = """config: default_spec_file_content = """config:
test-variable-1: test-value-1 test-variable-1: test-value-1

View File

@ -14,12 +14,13 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from pathlib import Path from pathlib import Path
from typing import Optional
from python_on_whales import DockerClient, DockerException from python_on_whales import DockerClient, DockerException
from stack_orchestrator.deploy.deployer import ( from stack_orchestrator.deploy.deployer import (
Deployer, Deployer,
DeployerException,
DeployerConfigGenerator, DeployerConfigGenerator,
DeployerException,
) )
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts
@ -32,10 +33,10 @@ class DockerDeployer(Deployer):
def __init__( def __init__(
self, self,
type: str, type: str,
deployment_context: Optional[DeploymentContext], deployment_context: DeploymentContext | None,
compose_files: list, compose_files: list,
compose_project_name: Optional[str], compose_project_name: str | None,
compose_env_file: Optional[str], compose_env_file: str | None,
) -> None: ) -> None:
self.docker = DockerClient( self.docker = DockerClient(
compose_files=compose_files, compose_files=compose_files,
@ -53,21 +54,21 @@ class DockerDeployer(Deployer):
try: try:
return self.docker.compose.up(detach=detach, services=services) return self.docker.compose.up(detach=detach, services=services)
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def down(self, timeout, volumes, skip_cluster_management): def down(self, timeout, volumes, skip_cluster_management):
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
return self.docker.compose.down(timeout=timeout, volumes=volumes) return self.docker.compose.down(timeout=timeout, volumes=volumes)
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def update_envs(self): def update_envs(self):
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
return self.docker.compose.restart() return self.docker.compose.restart()
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def status(self): def status(self):
if not opts.o.dry_run: if not opts.o.dry_run:
@ -75,23 +76,21 @@ class DockerDeployer(Deployer):
for p in self.docker.compose.ps(): for p in self.docker.compose.ps():
print(f"{p.name}\t{p.state.status}") print(f"{p.name}\t{p.state.status}")
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def ps(self): def ps(self):
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
return self.docker.compose.ps() return self.docker.compose.ps()
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def port(self, service, private_port): def port(self, service, private_port):
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
return self.docker.compose.port( return self.docker.compose.port(service=service, private_port=private_port)
service=service, private_port=private_port
)
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def execute(self, service, command, tty, envs): def execute(self, service, command, tty, envs):
if not opts.o.dry_run: if not opts.o.dry_run:
@ -100,7 +99,7 @@ class DockerDeployer(Deployer):
service=service, command=command, tty=tty, envs=envs service=service, command=command, tty=tty, envs=envs
) )
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def logs(self, services, tail, follow, stream): def logs(self, services, tail, follow, stream):
if not opts.o.dry_run: if not opts.o.dry_run:
@ -109,7 +108,7 @@ class DockerDeployer(Deployer):
services=services, tail=tail, follow=follow, stream=stream services=services, tail=tail, follow=follow, stream=stream
) )
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def run( def run(
self, self,
@ -118,10 +117,14 @@ class DockerDeployer(Deployer):
user=None, user=None,
volumes=None, volumes=None,
entrypoint=None, entrypoint=None,
env={}, env=None,
ports=[], ports=None,
detach=False, detach=False,
): ):
if ports is None:
ports = []
if env is None:
env = {}
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
return self.docker.run( return self.docker.run(
@ -136,9 +139,9 @@ class DockerDeployer(Deployer):
publish_all=len(ports) == 0, publish_all=len(ports) == 0,
) )
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
def run_job(self, job_name: str, release_name: Optional[str] = None): def run_job(self, job_name: str, release_name: str | None = None):
# release_name is ignored for Docker deployments (only used for K8s/Helm) # release_name is ignored for Docker deployments (only used for K8s/Helm)
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
@ -155,9 +158,7 @@ class DockerDeployer(Deployer):
) )
if not job_compose_file.exists(): if not job_compose_file.exists():
raise DeployerException( raise DeployerException(f"Job compose file not found: {job_compose_file}")
f"Job compose file not found: {job_compose_file}"
)
if opts.o.verbose: if opts.o.verbose:
print(f"Running job from: {job_compose_file}") print(f"Running job from: {job_compose_file}")
@ -175,7 +176,7 @@ class DockerDeployer(Deployer):
return job_docker.compose.run(service=job_name, remove=True, tty=True) return job_docker.compose.run(service=job_name, remove=True, tty=True)
except DockerException as e: except DockerException as e:
raise DeployerException(e) raise DeployerException(e) from e
class DockerDeployerConfigGenerator(DeployerConfigGenerator): class DockerDeployerConfigGenerator(DeployerConfigGenerator):

View File

@ -15,36 +15,37 @@
# Deploys the system components using a deployer (either docker-compose or k8s) # Deploys the system components using a deployer (either docker-compose or k8s)
import hashlib
import copy import copy
import hashlib
import os import os
import subprocess
import sys import sys
from dataclasses import dataclass from dataclasses import dataclass
from importlib import resources from importlib import resources
from typing import Optional
import subprocess
import click
from pathlib import Path from pathlib import Path
import click
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
get_stack_path,
include_exclude_check,
get_parsed_stack_config,
global_options2,
get_dev_root_path,
stack_is_in_deployment,
resolve_compose_file,
)
from stack_orchestrator.deploy.deployer import DeployerException
from stack_orchestrator.deploy.deployer_factory import getDeployer
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
from stack_orchestrator.deploy.deployer import DeployerException
from stack_orchestrator.deploy.deployer_factory import getDeployer
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.deployment_create import create as deployment_create from stack_orchestrator.deploy.deployment_create import create as deployment_create
from stack_orchestrator.deploy.deployment_create import init as deployment_init from stack_orchestrator.deploy.deployment_create import init as deployment_init
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
from stack_orchestrator.deploy.k8s import k8s_command from stack_orchestrator.deploy.k8s import k8s_command
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
get_dev_root_path,
get_parsed_stack_config,
get_stack_path,
global_options2,
include_exclude_check,
resolve_compose_file,
stack_is_in_deployment,
)
@click.group() @click.group()
@ -52,9 +53,7 @@ from stack_orchestrator.deploy.k8s import k8s_command
@click.option("--exclude", help="don't start these components") @click.option("--exclude", help="don't start these components")
@click.option("--env-file", help="env file to be used") @click.option("--env-file", help="env file to be used")
@click.option("--cluster", help="specify a non-default cluster name") @click.option("--cluster", help="specify a non-default cluster name")
@click.option( @click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)")
"--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)"
)
@click.pass_context @click.pass_context
def command(ctx, include, exclude, env_file, cluster, deploy_to): def command(ctx, include, exclude, env_file, cluster, deploy_to):
"""deploy a stack""" """deploy a stack"""
@ -93,7 +92,7 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
def create_deploy_context( def create_deploy_context(
global_context, global_context,
deployment_context: Optional[DeploymentContext], deployment_context: DeploymentContext | None,
stack, stack,
include, include,
exclude, exclude,
@ -116,9 +115,7 @@ def create_deploy_context(
# For helm chart deployments, skip compose file loading # For helm chart deployments, skip compose file loading
if is_helm_chart_deployment: if is_helm_chart_deployment:
cluster_context = ClusterContext( cluster_context = ClusterContext(global_context, cluster, [], [], [], None, env_file)
global_context, cluster, [], [], [], None, env_file
)
else: else:
cluster_context = _make_cluster_context( cluster_context = _make_cluster_context(
global_context, stack, include, exclude, cluster, env_file global_context, stack, include, exclude, cluster, env_file
@ -134,9 +131,7 @@ def create_deploy_context(
return DeployCommandContext(stack, cluster_context, deployer) return DeployCommandContext(stack, cluster_context, deployer)
def up_operation( def up_operation(ctx, services_list, stay_attached=False, skip_cluster_management=False):
ctx, services_list, stay_attached=False, skip_cluster_management=False
):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
deploy_context = ctx.obj deploy_context = ctx.obj
cluster_context = deploy_context.cluster_context cluster_context = deploy_context.cluster_context
@ -209,8 +204,7 @@ def ps_operation(ctx):
print(f"{port_mapping}", end="") print(f"{port_mapping}", end="")
else: else:
print( print(
f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}" f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}" f"->{port_mapping}",
f"->{port_mapping}",
end="", end="",
) )
comma = ", " comma = ", "
@ -260,11 +254,11 @@ def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
logs_stream = ctx.obj.deployer.logs( logs_stream = ctx.obj.deployer.logs(
services=services_list, tail=tail, follow=follow, stream=True services=services_list, tail=tail, follow=follow, stream=True
) )
for stream_type, stream_content in logs_stream: for _stream_type, stream_content in logs_stream:
print(stream_content.decode("utf-8"), end="") print(stream_content.decode("utf-8"), end="")
def run_job_operation(ctx, job_name: str, helm_release: Optional[str] = None): def run_job_operation(ctx, job_name: str, helm_release: str | None = None):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
if not global_context.dry_run: if not global_context.dry_run:
print(f"Running job: {job_name}") print(f"Running job: {job_name}")
@ -284,9 +278,7 @@ def up(ctx, extra_args):
@command.command() @command.command()
@click.option( @click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
)
@click.argument("extra_args", nargs=-1) # help: command: down<service1> <service2> @click.argument("extra_args", nargs=-1) # help: command: down<service1> <service2>
@click.pass_context @click.pass_context
def down(ctx, delete_volumes, extra_args): def down(ctx, delete_volumes, extra_args):
@ -386,14 +378,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
else: else:
# See: # See:
# https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure # https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
compose_dir = ( compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
Path(__file__).absolute().parent.parent.joinpath("data", "compose")
)
if cluster is None: if cluster is None:
cluster = _make_default_cluster_name( cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
deployment, compose_dir, stack, include, exclude
)
else: else:
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude) _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
@ -410,9 +398,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if stack_config is not None: if stack_config is not None:
# TODO: syntax check the input here # TODO: syntax check the input here
pods_in_scope = stack_config["pods"] pods_in_scope = stack_config["pods"]
cluster_config = ( cluster_config = stack_config["config"] if "config" in stack_config else None
stack_config["config"] if "config" in stack_config else None
)
else: else:
pods_in_scope = all_pods pods_in_scope = all_pods
@ -434,43 +420,29 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if include_exclude_check(pod_name, include, exclude): if include_exclude_check(pod_name, include, exclude):
if pod_repository is None or pod_repository == "internal": if pod_repository is None or pod_repository == "internal":
if deployment: if deployment:
compose_file_name = os.path.join( compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
compose_dir, f"docker-compose-{pod_path}.yml"
)
else: else:
compose_file_name = resolve_compose_file(stack, pod_name) compose_file_name = resolve_compose_file(stack, pod_name)
else: else:
if deployment: if deployment:
compose_file_name = os.path.join( compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
compose_dir, f"docker-compose-{pod_name}.yml"
)
pod_pre_start_command = pod.get("pre_start_command") pod_pre_start_command = pod.get("pre_start_command")
pod_post_start_command = pod.get("post_start_command") pod_post_start_command = pod.get("post_start_command")
script_dir = compose_dir.parent.joinpath( script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts")
"pods", pod_name, "scripts"
)
if pod_pre_start_command is not None: if pod_pre_start_command is not None:
pre_start_commands.append( pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command))
os.path.join(script_dir, pod_pre_start_command)
)
if pod_post_start_command is not None: if pod_post_start_command is not None:
post_start_commands.append( post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
os.path.join(script_dir, pod_post_start_command)
)
else: else:
# TODO: fix this code for external stack with scripts # TODO: fix this code for external stack with scripts
pod_root_dir = os.path.join( pod_root_dir = os.path.join(
dev_root_path, pod_repository.split("/")[-1], pod["path"] dev_root_path, pod_repository.split("/")[-1], pod["path"]
) )
compose_file_name = os.path.join( compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
pod_root_dir, f"docker-compose-{pod_name}.yml"
)
pod_pre_start_command = pod.get("pre_start_command") pod_pre_start_command = pod.get("pre_start_command")
pod_post_start_command = pod.get("post_start_command") pod_post_start_command = pod.get("post_start_command")
if pod_pre_start_command is not None: if pod_pre_start_command is not None:
pre_start_commands.append( pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
os.path.join(pod_root_dir, pod_pre_start_command)
)
if pod_post_start_command is not None: if pod_post_start_command is not None:
post_start_commands.append( post_start_commands.append(
os.path.join(pod_root_dir, pod_post_start_command) os.path.join(pod_root_dir, pod_post_start_command)
@ -514,9 +486,7 @@ def _run_command(ctx, cluster_name, command):
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
if ctx.debug: if ctx.debug:
command_env["CERC_SCRIPT_DEBUG"] = "true" command_env["CERC_SCRIPT_DEBUG"] = "true"
command_result = subprocess.run( command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
command_file, shell=True, env=command_env, cwd=command_dir
)
if command_result.returncode != 0: if command_result.returncode != 0:
print(f"FATAL Error running command: {command}") print(f"FATAL Error running command: {command}")
sys.exit(1) sys.exit(1)
@ -573,9 +543,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
# "It returned with code 1" # "It returned with code 1"
if "It returned with code 1" in str(error): if "It returned with code 1" in str(error):
if ctx.verbose: if ctx.verbose:
print( print("Config export script returned an error, re-trying")
"Config export script returned an error, re-trying"
)
# If the script failed to execute # If the script failed to execute
# (e.g. the file is not there) then we get: # (e.g. the file is not there) then we get:
# "It returned with code 2" # "It returned with code 2"

View File

@ -13,8 +13,9 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from typing import List, Mapping, Optional from collections.abc import Mapping
from dataclasses import dataclass from dataclasses import dataclass
from stack_orchestrator.command_types import CommandOptions from stack_orchestrator.command_types import CommandOptions
from stack_orchestrator.deploy.deployer import Deployer from stack_orchestrator.deploy.deployer import Deployer
@ -23,19 +24,19 @@ from stack_orchestrator.deploy.deployer import Deployer
class ClusterContext: class ClusterContext:
# TODO: this should be in its own object not stuffed in here # TODO: this should be in its own object not stuffed in here
options: CommandOptions options: CommandOptions
cluster: Optional[str] cluster: str | None
compose_files: List[str] compose_files: list[str]
pre_start_commands: List[str] pre_start_commands: list[str]
post_start_commands: List[str] post_start_commands: list[str]
config: Optional[str] config: str | None
env_file: Optional[str] env_file: str | None
@dataclass @dataclass
class DeployCommandContext: class DeployCommandContext:
stack: str stack: str
cluster_context: ClusterContext cluster_context: ClusterContext
deployer: Optional[Deployer] deployer: Deployer | None
@dataclass @dataclass

View File

@ -13,15 +13,16 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from typing import List, Any from typing import Any
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
from stack_orchestrator.opts import opts
from stack_orchestrator.util import ( from stack_orchestrator.util import (
get_parsed_stack_config, get_parsed_stack_config,
get_yaml,
get_pod_list, get_pod_list,
get_yaml,
resolve_compose_file, resolve_compose_file,
) )
from stack_orchestrator.opts import opts
def _container_image_from_service(stack: str, service: str): def _container_image_from_service(stack: str, service: str):
@ -32,7 +33,7 @@ def _container_image_from_service(stack: str, service: str):
yaml = get_yaml() yaml = get_yaml()
for pod in pods: for pod in pods:
pod_file_path = resolve_compose_file(stack, pod) pod_file_path = resolve_compose_file(stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r")) parsed_pod_file = yaml.load(open(pod_file_path))
if "services" in parsed_pod_file: if "services" in parsed_pod_file:
services = parsed_pod_file["services"] services = parsed_pod_file["services"]
if service in services: if service in services:
@ -45,7 +46,7 @@ def _container_image_from_service(stack: str, service: str):
def parsed_pod_files_map_from_file_names(pod_files): def parsed_pod_files_map_from_file_names(pod_files):
parsed_pod_yaml_map: Any = {} parsed_pod_yaml_map: Any = {}
for pod_file in pod_files: for pod_file in pod_files:
with open(pod_file, "r") as pod_file_descriptor: with open(pod_file) as pod_file_descriptor:
parsed_pod_file = get_yaml().load(pod_file_descriptor) parsed_pod_file = get_yaml().load(pod_file_descriptor)
parsed_pod_yaml_map[pod_file] = parsed_pod_file parsed_pod_yaml_map[pod_file] = parsed_pod_file
if opts.o.debug: if opts.o.debug:
@ -53,7 +54,7 @@ def parsed_pod_files_map_from_file_names(pod_files):
return parsed_pod_yaml_map return parsed_pod_yaml_map
def images_for_deployment(pod_files: List[str]): def images_for_deployment(pod_files: list[str]):
image_set = set() image_set = set()
parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
# Find the set of images in the pods # Find the set of images in the pods
@ -69,7 +70,7 @@ def images_for_deployment(pod_files: List[str]):
return image_set return image_set
def _volumes_to_docker(mounts: List[VolumeMapping]): def _volumes_to_docker(mounts: list[VolumeMapping]):
# Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")] # Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")]
result = [] result = []
for mount in mounts: for mount in mounts:
@ -79,7 +80,7 @@ def _volumes_to_docker(mounts: List[VolumeMapping]):
def run_container_command( def run_container_command(
ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping] ctx: DeployCommandContext, service: str, command: str, mounts: list[VolumeMapping]
): ):
deployer = ctx.deployer deployer = ctx.deployer
if deployer is None: if deployer is None:

View File

@ -15,7 +15,6 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pathlib import Path from pathlib import Path
from typing import Optional
class Deployer(ABC): class Deployer(ABC):
@ -59,14 +58,14 @@ class Deployer(ABC):
user=None, user=None,
volumes=None, volumes=None,
entrypoint=None, entrypoint=None,
env={}, env=None,
ports=[], ports=None,
detach=False, detach=False,
): ):
pass pass
@abstractmethod @abstractmethod
def run_job(self, job_name: str, release_name: Optional[str] = None): def run_job(self, job_name: str, release_name: str | None = None):
pass pass
def prepare(self, skip_cluster_management): def prepare(self, skip_cluster_management):
@ -74,9 +73,7 @@ class Deployer(ABC):
Only supported for k8s deployers. Compose deployers raise an error. Only supported for k8s deployers. Compose deployers raise an error.
""" """
raise DeployerException( raise DeployerException("prepare is only supported for k8s deployments")
"prepare is only supported for k8s deployments"
)
class DeployerException(Exception): class DeployerException(Exception):

View File

@ -14,14 +14,14 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.deploy.k8s.deploy_k8s import (
K8sDeployer,
K8sDeployerConfigGenerator,
)
from stack_orchestrator.deploy.compose.deploy_docker import ( from stack_orchestrator.deploy.compose.deploy_docker import (
DockerDeployer, DockerDeployer,
DockerDeployerConfigGenerator, DockerDeployerConfigGenerator,
) )
from stack_orchestrator.deploy.k8s.deploy_k8s import (
K8sDeployer,
K8sDeployerConfigGenerator,
)
def getDeployerConfigGenerator(type: str, deployment_context): def getDeployerConfigGenerator(type: str, deployment_context):
@ -44,10 +44,7 @@ def getDeployer(
compose_project_name, compose_project_name,
compose_env_file, compose_env_file,
) )
elif ( elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
type == type == constants.k8s_deploy_type
or type == constants.k8s_kind_deploy_type
):
return K8sDeployer( return K8sDeployer(
type, type,
deployment_context, deployment_context,

View File

@ -13,29 +13,28 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from pathlib import Path
import subprocess import subprocess
import sys import sys
from pathlib import Path
import click
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.deploy.images import push_images_operation
from stack_orchestrator.deploy.deploy import ( from stack_orchestrator.deploy.deploy import (
up_operation, create_deploy_context,
down_operation, down_operation,
prepare_operation,
ps_operation,
port_operation,
status_operation,
)
from stack_orchestrator.deploy.deploy import (
exec_operation, exec_operation,
logs_operation, logs_operation,
create_deploy_context, port_operation,
prepare_operation,
ps_operation,
status_operation,
up_operation,
update_envs_operation, update_envs_operation,
) )
from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.images import push_images_operation
@click.group() @click.group()
@ -149,9 +148,7 @@ def prepare(ctx, skip_cluster_management):
# TODO: remove legacy up command since it's an alias for stop # TODO: remove legacy up command since it's an alias for stop
@command.command() @command.command()
@click.option( @click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
)
@click.option( @click.option(
"--skip-cluster-management/--perform-cluster-management", "--skip-cluster-management/--perform-cluster-management",
default=True, default=True,
@ -168,9 +165,7 @@ def down(ctx, delete_volumes, skip_cluster_management, extra_args):
# stop is the preferred alias for down # stop is the preferred alias for down
@command.command() @command.command()
@click.option( @click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
)
@click.option( @click.option(
"--skip-cluster-management/--perform-cluster-management", "--skip-cluster-management/--perform-cluster-management",
default=True, default=True,
@ -256,9 +251,7 @@ def run_job(ctx, job_name, helm_release):
@command.command() @command.command()
@click.option("--stack-path", help="Path to stack git repo (overrides stored path)") @click.option("--stack-path", help="Path to stack git repo (overrides stored path)")
@click.option( @click.option("--spec-file", help="Path to GitOps spec.yml in repo (e.g., deployment/spec.yml)")
"--spec-file", help="Path to GitOps spec.yml in repo (e.g., deployment/spec.yml)"
)
@click.option("--config-file", help="Config file to pass to deploy init") @click.option("--config-file", help="Config file to pass to deploy init")
@click.option( @click.option(
"--force", "--force",
@ -292,33 +285,27 @@ def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
commands.py on each restart. Use 'deploy init' only for initial commands.py on each restart. Use 'deploy init' only for initial
spec generation, then customize and commit to your operator repo. spec generation, then customize and commit to your operator repo.
""" """
from stack_orchestrator.util import get_yaml, get_parsed_deployment_spec
from stack_orchestrator.deploy.deployment_create import create_operation from stack_orchestrator.deploy.deployment_create import create_operation
from stack_orchestrator.deploy.dns_probe import verify_dns_via_probe from stack_orchestrator.deploy.dns_probe import verify_dns_via_probe
from stack_orchestrator.util import get_parsed_deployment_spec, get_yaml
deployment_context: DeploymentContext = ctx.obj deployment_context: DeploymentContext = ctx.obj
# Get current spec info (before git pull) # Get current spec info (before git pull)
current_spec = deployment_context.spec current_spec = deployment_context.spec
current_http_proxy = current_spec.get_http_proxy() current_http_proxy = current_spec.get_http_proxy()
current_hostname = ( current_hostname = current_http_proxy[0]["host-name"] if current_http_proxy else None
current_http_proxy[0]["host-name"] if current_http_proxy else None
)
# Resolve stack source path # Resolve stack source path
if stack_path: if stack_path:
stack_source = Path(stack_path).resolve() stack_source = Path(stack_path).resolve()
else: else:
# Try to get from deployment.yml # Try to get from deployment.yml
deployment_file = ( deployment_file = deployment_context.deployment_dir / constants.deployment_file_name
deployment_context.deployment_dir / constants.deployment_file_name
)
deployment_data = get_yaml().load(open(deployment_file)) deployment_data = get_yaml().load(open(deployment_file))
stack_source_str = deployment_data.get("stack-source") stack_source_str = deployment_data.get("stack-source")
if not stack_source_str: if not stack_source_str:
print( print("Error: No stack-source in deployment.yml and --stack-path not provided")
"Error: No stack-source in deployment.yml and --stack-path not provided"
)
print("Use --stack-path to specify the stack git repository location") print("Use --stack-path to specify the stack git repository location")
sys.exit(1) sys.exit(1)
stack_source = Path(stack_source_str) stack_source = Path(stack_source_str)
@ -334,9 +321,7 @@ def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
# Step 1: Git pull (brings in updated spec.yml from operator's repo) # Step 1: Git pull (brings in updated spec.yml from operator's repo)
print("\n[1/4] Pulling latest code from stack repository...") print("\n[1/4] Pulling latest code from stack repository...")
git_result = subprocess.run( git_result = subprocess.run(["git", "pull"], cwd=stack_source, capture_output=True, text=True)
["git", "pull"], cwd=stack_source, capture_output=True, text=True
)
if git_result.returncode != 0: if git_result.returncode != 0:
print(f"Git pull failed: {git_result.stderr}") print(f"Git pull failed: {git_result.stderr}")
sys.exit(1) sys.exit(1)
@ -408,17 +393,13 @@ def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
# Stop deployment # Stop deployment
print("\n[4/4] Restarting deployment...") print("\n[4/4] Restarting deployment...")
ctx.obj = make_deploy_context(ctx) ctx.obj = make_deploy_context(ctx)
down_operation( down_operation(ctx, delete_volumes=False, extra_args_list=[], skip_cluster_management=True)
ctx, delete_volumes=False, extra_args_list=[], skip_cluster_management=True
)
# Namespace deletion wait is handled by _ensure_namespace() in # Namespace deletion wait is handled by _ensure_namespace() in
# the deployer — no fixed sleep needed here. # the deployer — no fixed sleep needed here.
# Start deployment # Start deployment
up_operation( up_operation(ctx, services_list=None, stay_attached=False, skip_cluster_management=True)
ctx, services_list=None, stay_attached=False, skip_cluster_management=True
)
print("\n=== Restart Complete ===") print("\n=== Restart Complete ===")
print("Deployment restarted with git-tracked configuration.") print("Deployment restarted with git-tracked configuration.")

View File

@ -18,9 +18,9 @@ import os
from pathlib import Path from pathlib import Path
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.util import get_yaml
from stack_orchestrator.deploy.stack import Stack
from stack_orchestrator.deploy.spec import Spec from stack_orchestrator.deploy.spec import Spec
from stack_orchestrator.deploy.stack import Stack
from stack_orchestrator.util import get_yaml
class DeploymentContext: class DeploymentContext:
@ -58,7 +58,7 @@ class DeploymentContext:
self.stack.init_from_file(self.get_stack_file()) self.stack.init_from_file(self.get_stack_file())
deployment_file_path = self.get_deployment_file() deployment_file_path = self.get_deployment_file()
if deployment_file_path.exists(): if deployment_file_path.exists():
obj = get_yaml().load(open(deployment_file_path, "r")) obj = get_yaml().load(open(deployment_file_path))
self.id = obj[constants.cluster_id_key] self.id = obj[constants.cluster_id_key]
# Handle the case of a legacy deployment with no file # Handle the case of a legacy deployment with no file
# Code below is intended to match the output from _make_default_cluster_name() # Code below is intended to match the output from _make_default_cluster_name()
@ -75,7 +75,7 @@ class DeploymentContext:
raise ValueError(f"File is not inside deployment directory: {file_path}") raise ValueError(f"File is not inside deployment directory: {file_path}")
yaml = get_yaml() yaml = get_yaml()
with open(file_path, "r") as f: with open(file_path) as f:
yaml_data = yaml.load(f) yaml_data = yaml.load(f)
modifier_func(yaml_data) modifier_func(yaml_data)

View File

@ -13,44 +13,44 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click import base64
from importlib import util import filecmp
import json import json
import os import os
import re
import base64
from pathlib import Path
from typing import List, Optional
import random import random
from shutil import copy, copyfile, copytree, rmtree import re
from secrets import token_hex
import sys import sys
import filecmp
import tempfile import tempfile
from importlib import util
from pathlib import Path
from secrets import token_hex
from shutil import copy, copyfile, copytree, rmtree
import click
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
get_stack_path,
get_parsed_deployment_spec,
get_parsed_stack_config,
global_options,
get_yaml,
get_pod_list,
get_pod_file_path,
pod_has_scripts,
get_pod_script_paths,
get_plugin_code_paths,
error_exit,
env_var_map_from_file,
resolve_config_dir,
get_job_list,
get_job_file_path,
)
from stack_orchestrator.deploy.spec import Spec
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.spec import Spec
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
env_var_map_from_file,
error_exit,
get_job_file_path,
get_job_list,
get_parsed_deployment_spec,
get_parsed_stack_config,
get_plugin_code_paths,
get_pod_file_path,
get_pod_list,
get_pod_script_paths,
get_stack_path,
get_yaml,
global_options,
pod_has_scripts,
resolve_config_dir,
)
def _make_default_deployment_dir(): def _make_default_deployment_dir():
@ -66,7 +66,7 @@ def _get_ports(stack):
pod_file_path = get_pod_file_path(stack, parsed_stack, pod) pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
if pod_file_path is None: if pod_file_path is None:
continue continue
parsed_pod_file = yaml.load(open(pod_file_path, "r")) parsed_pod_file = yaml.load(open(pod_file_path))
if "services" in parsed_pod_file: if "services" in parsed_pod_file:
for svc_name, svc in parsed_pod_file["services"].items(): for svc_name, svc in parsed_pod_file["services"].items():
if "ports" in svc: if "ports" in svc:
@ -102,7 +102,7 @@ def _get_named_volumes(stack):
pod_file_path = get_pod_file_path(stack, parsed_stack, pod) pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
if pod_file_path is None: if pod_file_path is None:
continue continue
parsed_pod_file = yaml.load(open(pod_file_path, "r")) parsed_pod_file = yaml.load(open(pod_file_path))
if "volumes" in parsed_pod_file: if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"] volumes = parsed_pod_file["volumes"]
for volume in volumes.keys(): for volume in volumes.keys():
@ -132,9 +132,7 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
absolute_path.mkdir(parents=True, exist_ok=True) absolute_path.mkdir(parents=True, exist_ok=True)
else: else:
if not path.exists(): if not path.exists():
print( print(f"WARNING: mount path for volume {volume} does not exist: {path_string}")
f"WARNING: mount path for volume {volume} does not exist: {path_string}"
)
# See: # See:
@ -151,9 +149,7 @@ def _fixup_pod_file(pod, spec, compose_dir):
volume_spec = spec_volumes[volume] volume_spec = spec_volumes[volume]
if volume_spec: if volume_spec:
volume_spec_fixedup = ( volume_spec_fixedup = (
volume_spec volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
if Path(volume_spec).is_absolute()
else f".{volume_spec}"
) )
_create_bind_dir_if_relative(volume, volume_spec, compose_dir) _create_bind_dir_if_relative(volume, volume_spec, compose_dir)
# this is Docker specific # this is Docker specific
@ -328,10 +324,7 @@ def _get_mapped_ports(stack: str, map_recipe: str):
else: else:
print("Error: bad map_recipe") print("Error: bad map_recipe")
else: else:
print( print(f"Error: --map-ports-to-host must specify one of: " f"{port_map_recipes}")
f"Error: --map-ports-to-host must specify one of: "
f"{port_map_recipes}"
)
sys.exit(1) sys.exit(1)
return ports return ports
@ -356,9 +349,7 @@ def _parse_config_variables(variable_values: str):
@click.command() @click.command()
@click.option("--config", help="Provide config variables for the deployment") @click.option("--config", help="Provide config variables for the deployment")
@click.option( @click.option("--config-file", help="Provide config variables in a file for the deployment")
"--config-file", help="Provide config variables in a file for the deployment"
)
@click.option("--kube-config", help="Provide a config file for a k8s deployment") @click.option("--kube-config", help="Provide a config file for a k8s deployment")
@click.option( @click.option(
"--image-registry", "--image-registry",
@ -372,9 +363,7 @@ def _parse_config_variables(variable_values: str):
"localhost-same, any-same, localhost-fixed-random, any-fixed-random", "localhost-same, any-same, localhost-fixed-random, any-fixed-random",
) )
@click.pass_context @click.pass_context
def init( def init(ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host):
ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host
):
stack = global_options(ctx).stack stack = global_options(ctx).stack
deployer_type = ctx.obj.deployer.type deployer_type = ctx.obj.deployer.type
deploy_command_context = ctx.obj deploy_command_context = ctx.obj
@ -421,13 +410,9 @@ def init_operation(
else: else:
# Check for --kube-config supplied for non-relevant deployer types # Check for --kube-config supplied for non-relevant deployer types
if kube_config is not None: if kube_config is not None:
error_exit( error_exit(f"--kube-config is not allowed with a {deployer_type} deployment")
f"--kube-config is not allowed with a {deployer_type} deployment"
)
if image_registry is not None: if image_registry is not None:
error_exit( error_exit(f"--image-registry is not allowed with a {deployer_type} deployment")
f"--image-registry is not allowed with a {deployer_type} deployment"
)
if default_spec_file_content: if default_spec_file_content:
spec_file_content.update(default_spec_file_content) spec_file_content.update(default_spec_file_content)
config_variables = _parse_config_variables(config) config_variables = _parse_config_variables(config)
@ -479,9 +464,7 @@ def init_operation(
spec_file_content["configmaps"] = configmap_descriptors spec_file_content["configmaps"] = configmap_descriptors
if opts.o.debug: if opts.o.debug:
print( print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
f"Creating spec file for stack: {stack} with content: {spec_file_content}"
)
with open(output, "w") as output_file: with open(output, "w") as output_file:
get_yaml().dump(spec_file_content, output_file) get_yaml().dump(spec_file_content, output_file)
@ -497,7 +480,8 @@ def _generate_and_store_secrets(config_vars: dict, deployment_name: str):
Called by `deploy create` - generates fresh secrets and stores them. Called by `deploy create` - generates fresh secrets and stores them.
Returns the generated secrets dict for reference. Returns the generated secrets dict for reference.
""" """
from kubernetes import client, config as k8s_config from kubernetes import client
from kubernetes import config as k8s_config
secrets = {} secrets = {}
for name, value in config_vars.items(): for name, value in config_vars.items():
@ -526,9 +510,7 @@ def _generate_and_store_secrets(config_vars: dict, deployment_name: str):
try: try:
k8s_config.load_incluster_config() k8s_config.load_incluster_config()
except Exception: except Exception:
print( print("Warning: Could not load kube config, secrets will not be stored in K8s")
"Warning: Could not load kube config, secrets will not be stored in K8s"
)
return secrets return secrets
v1 = client.CoreV1Api() v1 = client.CoreV1Api()
@ -555,7 +537,7 @@ def _generate_and_store_secrets(config_vars: dict, deployment_name: str):
return secrets return secrets
def create_registry_secret(spec: Spec, deployment_name: str) -> Optional[str]: def create_registry_secret(spec: Spec, deployment_name: str) -> str | None:
"""Create K8s docker-registry secret from spec + environment. """Create K8s docker-registry secret from spec + environment.
Reads registry configuration from spec.yml and creates a Kubernetes Reads registry configuration from spec.yml and creates a Kubernetes
@ -568,7 +550,8 @@ def create_registry_secret(spec: Spec, deployment_name: str) -> Optional[str]:
Returns: Returns:
The secret name if created, None if no registry config The secret name if created, None if no registry config
""" """
from kubernetes import client, config as k8s_config from kubernetes import client
from kubernetes import config as k8s_config
registry_config = spec.get_image_registry_config() registry_config = spec.get_image_registry_config()
if not registry_config: if not registry_config:
@ -585,17 +568,12 @@ def create_registry_secret(spec: Spec, deployment_name: str) -> Optional[str]:
assert token_env is not None assert token_env is not None
token = os.environ.get(token_env) token = os.environ.get(token_env)
if not token: if not token:
print( print(f"Warning: Registry token env var '{token_env}' not set, " "skipping registry secret")
f"Warning: Registry token env var '{token_env}' not set, "
"skipping registry secret"
)
return None return None
# Create dockerconfigjson format (Docker API uses "password" field for tokens) # Create dockerconfigjson format (Docker API uses "password" field for tokens)
auth = base64.b64encode(f"{username}:{token}".encode()).decode() auth = base64.b64encode(f"{username}:{token}".encode()).decode()
docker_config = { docker_config = {"auths": {server: {"username": username, "password": token, "auth": auth}}}
"auths": {server: {"username": username, "password": token, "auth": auth}}
}
# Secret name derived from deployment name # Secret name derived from deployment name
secret_name = f"{deployment_name}-registry" secret_name = f"{deployment_name}-registry"
@ -615,11 +593,7 @@ def create_registry_secret(spec: Spec, deployment_name: str) -> Optional[str]:
k8s_secret = client.V1Secret( k8s_secret = client.V1Secret(
metadata=client.V1ObjectMeta(name=secret_name), metadata=client.V1ObjectMeta(name=secret_name),
data={ data={".dockerconfigjson": base64.b64encode(json.dumps(docker_config).encode()).decode()},
".dockerconfigjson": base64.b64encode(
json.dumps(docker_config).encode()
).decode()
},
type="kubernetes.io/dockerconfigjson", type="kubernetes.io/dockerconfigjson",
) )
@ -636,17 +610,14 @@ def create_registry_secret(spec: Spec, deployment_name: str) -> Optional[str]:
return secret_name return secret_name
def _write_config_file( def _write_config_file(spec_file: Path, config_env_file: Path, deployment_name: str | None = None):
spec_file: Path, config_env_file: Path, deployment_name: Optional[str] = None
):
spec_content = get_parsed_deployment_spec(spec_file) spec_content = get_parsed_deployment_spec(spec_file)
config_vars = spec_content.get("config", {}) or {} config_vars = spec_content.get("config", {}) or {}
# Generate and store secrets in K8s if deployment_name provided and tokens exist # Generate and store secrets in K8s if deployment_name provided and tokens exist
if deployment_name and config_vars: if deployment_name and config_vars:
has_generate_tokens = any( has_generate_tokens = any(
isinstance(v, str) and GENERATE_TOKEN_PATTERN.search(v) isinstance(v, str) and GENERATE_TOKEN_PATTERN.search(v) for v in config_vars.values()
for v in config_vars.values()
) )
if has_generate_tokens: if has_generate_tokens:
_generate_and_store_secrets(config_vars, deployment_name) _generate_and_store_secrets(config_vars, deployment_name)
@ -669,13 +640,13 @@ def _write_kube_config_file(external_path: Path, internal_path: Path):
copyfile(external_path, internal_path) copyfile(external_path, internal_path)
def _copy_files_to_directory(file_paths: List[Path], directory: Path): def _copy_files_to_directory(file_paths: list[Path], directory: Path):
for path in file_paths: for path in file_paths:
# Using copy to preserve the execute bit # Using copy to preserve the execute bit
copy(path, os.path.join(directory, os.path.basename(path))) copy(path, os.path.join(directory, os.path.basename(path)))
def _create_deployment_file(deployment_dir: Path, stack_source: Optional[Path] = None): def _create_deployment_file(deployment_dir: Path, stack_source: Path | None = None):
deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name) deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name)
cluster = f"{constants.cluster_name_prefix}{token_hex(8)}" cluster = f"{constants.cluster_name_prefix}{token_hex(8)}"
deployment_content = {constants.cluster_id_key: cluster} deployment_content = {constants.cluster_id_key: cluster}
@ -701,9 +672,7 @@ def _check_volume_definitions(spec):
@click.command() @click.command()
@click.option( @click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
"--spec-file", required=True, help="Spec file to use to create this deployment"
)
@click.option("--deployment-dir", help="Create deployment files in this directory") @click.option("--deployment-dir", help="Create deployment files in this directory")
@click.option( @click.option(
"--update", "--update",
@ -757,9 +726,7 @@ def create_operation(
initial_peers=None, initial_peers=None,
extra_args=(), extra_args=(),
): ):
parsed_spec = Spec( parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file)
)
_check_volume_definitions(parsed_spec) _check_volume_definitions(parsed_spec)
stack_name = parsed_spec["stack"] stack_name = parsed_spec["stack"]
deployment_type = parsed_spec[constants.deploy_to_key] deployment_type = parsed_spec[constants.deploy_to_key]
@ -816,9 +783,7 @@ def create_operation(
# Exclude config file to preserve deployment settings # Exclude config file to preserve deployment settings
# (XXX breaks passing config vars from spec) # (XXX breaks passing config vars from spec)
exclude_patterns = ["data", "data/*", constants.config_file_name] exclude_patterns = ["data", "data/*", constants.config_file_name]
_safe_copy_tree( _safe_copy_tree(temp_dir, deployment_dir_path, exclude_patterns=exclude_patterns)
temp_dir, deployment_dir_path, exclude_patterns=exclude_patterns
)
finally: finally:
# Clean up temp dir # Clean up temp dir
rmtree(temp_dir) rmtree(temp_dir)
@ -841,18 +806,14 @@ def create_operation(
deployment_context = DeploymentContext() deployment_context = DeploymentContext()
deployment_context.init(deployment_dir_path) deployment_context.init(deployment_dir_path)
# Call the deployer to generate any deployer-specific files (e.g. for kind) # Call the deployer to generate any deployer-specific files (e.g. for kind)
deployer_config_generator = getDeployerConfigGenerator( deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
deployment_type, deployment_context
)
# TODO: make deployment_dir_path a Path above # TODO: make deployment_dir_path a Path above
if deployer_config_generator is not None: if deployer_config_generator is not None:
deployer_config_generator.generate(deployment_dir_path) deployer_config_generator.generate(deployment_dir_path)
call_stack_deploy_create( call_stack_deploy_create(deployment_context, [network_dir, initial_peers, *extra_args])
deployment_context, [network_dir, initial_peers, *extra_args]
)
def _safe_copy_tree(src: Path, dst: Path, exclude_patterns: Optional[List[str]] = None): def _safe_copy_tree(src: Path, dst: Path, exclude_patterns: list[str] | None = None):
""" """
Recursively copy a directory tree, backing up changed files with .bak suffix. Recursively copy a directory tree, backing up changed files with .bak suffix.
@ -873,11 +834,7 @@ def _safe_copy_tree(src: Path, dst: Path, exclude_patterns: Optional[List[str]]
def safe_copy_file(src_file: Path, dst_file: Path): def safe_copy_file(src_file: Path, dst_file: Path):
"""Copy file, backing up destination if it differs.""" """Copy file, backing up destination if it differs."""
if ( if dst_file.exists() and not dst_file.is_dir() and not filecmp.cmp(src_file, dst_file):
dst_file.exists()
and not dst_file.is_dir()
and not filecmp.cmp(src_file, dst_file)
):
os.rename(dst_file, f"{dst_file}.bak") os.rename(dst_file, f"{dst_file}.bak")
copy(src_file, dst_file) copy(src_file, dst_file)
@ -903,7 +860,7 @@ def _write_deployment_files(
stack_name: str, stack_name: str,
deployment_type: str, deployment_type: str,
include_deployment_file: bool = True, include_deployment_file: bool = True,
stack_source: Optional[Path] = None, stack_source: Path | None = None,
): ):
""" """
Write deployment files to target directory. Write deployment files to target directory.
@ -931,9 +888,7 @@ def _write_deployment_files(
# Use stack_name as deployment_name for K8s secret naming # Use stack_name as deployment_name for K8s secret naming
# Extract just the name part if stack_name is a path ("path/to/stack" -> "stack") # Extract just the name part if stack_name is a path ("path/to/stack" -> "stack")
deployment_name = Path(stack_name).name.replace("_", "-") deployment_name = Path(stack_name).name.replace("_", "-")
_write_config_file( _write_config_file(spec_file, target_dir.joinpath(constants.config_file_name), deployment_name)
spec_file, target_dir.joinpath(constants.config_file_name), deployment_name
)
# Copy any k8s config file into the target dir # Copy any k8s config file into the target dir
if deployment_type == "k8s": if deployment_type == "k8s":
@ -954,7 +909,7 @@ def _write_deployment_files(
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod) pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
if pod_file_path is None: if pod_file_path is None:
continue continue
parsed_pod_file = yaml.load(open(pod_file_path, "r")) parsed_pod_file = yaml.load(open(pod_file_path))
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
destination_pod_dir = destination_pods_dir.joinpath(pod) destination_pod_dir = destination_pods_dir.joinpath(pod)
os.makedirs(destination_pod_dir, exist_ok=True) os.makedirs(destination_pod_dir, exist_ok=True)
@ -962,7 +917,7 @@ def _write_deployment_files(
print(f"extra config dirs: {extra_config_dirs}") print(f"extra config dirs: {extra_config_dirs}")
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
with open( with open(
destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w" destination_compose_dir.joinpath(f"docker-compose-{pod}.yml"), "w"
) as output_file: ) as output_file:
yaml.dump(parsed_pod_file, output_file) yaml.dump(parsed_pod_file, output_file)
@ -986,12 +941,8 @@ def _write_deployment_files(
for configmap in parsed_spec.get_configmaps(): for configmap in parsed_spec.get_configmaps():
source_config_dir = resolve_config_dir(stack_name, configmap) source_config_dir = resolve_config_dir(stack_name, configmap)
if os.path.exists(source_config_dir): if os.path.exists(source_config_dir):
destination_config_dir = target_dir.joinpath( destination_config_dir = target_dir.joinpath("configmaps", configmap)
"configmaps", configmap copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
)
copytree(
source_config_dir, destination_config_dir, dirs_exist_ok=True
)
else: else:
# TODO: # TODO:
# This is odd - looks up config dir that matches a volume name, # This is odd - looks up config dir that matches a volume name,
@ -1022,12 +973,10 @@ def _write_deployment_files(
for job in jobs: for job in jobs:
job_file_path = get_job_file_path(stack_name, parsed_stack, job) job_file_path = get_job_file_path(stack_name, parsed_stack, job)
if job_file_path and job_file_path.exists(): if job_file_path and job_file_path.exists():
parsed_job_file = yaml.load(open(job_file_path, "r")) parsed_job_file = yaml.load(open(job_file_path))
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir) _fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
with open( with open(
destination_compose_jobs_dir.joinpath( destination_compose_jobs_dir.joinpath(f"docker-compose-{job}.yml"),
"docker-compose-%s.yml" % job
),
"w", "w",
) as output_file: ) as output_file:
yaml.dump(parsed_job_file, output_file) yaml.dump(parsed_job_file, output_file)
@ -1042,18 +991,14 @@ def _write_deployment_files(
@click.option("--node-moniker", help="Moniker for this node") @click.option("--node-moniker", help="Moniker for this node")
@click.option("--chain-id", help="The new chain id") @click.option("--chain-id", help="The new chain id")
@click.option("--key-name", help="Name for new node key") @click.option("--key-name", help="Name for new node key")
@click.option( @click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes")
"--gentx-files", help="List of comma-delimited gentx filenames from other nodes"
)
@click.option( @click.option(
"--gentx-addresses", "--gentx-addresses",
type=str, type=str,
help="List of comma-delimited validator addresses for other nodes", help="List of comma-delimited validator addresses for other nodes",
) )
@click.option("--genesis-file", help="Genesis file for the network") @click.option("--genesis-file", help="Genesis file for the network")
@click.option( @click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase")
"--initialize-network", is_flag=True, default=False, help="Initialize phase"
)
@click.option("--join-network", is_flag=True, default=False, help="Join phase") @click.option("--join-network", is_flag=True, default=False, help="Join phase")
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase") @click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
@click.option("--create-network", is_flag=True, default=False, help="Create phase") @click.option("--create-network", is_flag=True, default=False, help="Create phase")

View File

@ -6,7 +6,7 @@
import secrets import secrets
import socket import socket
import time import time
from typing import Optional
import requests import requests
from kubernetes import client from kubernetes import client
@ -15,7 +15,8 @@ def get_server_egress_ip() -> str:
"""Get this server's public egress IP via ipify.""" """Get this server's public egress IP via ipify."""
response = requests.get("https://api.ipify.org", timeout=10) response = requests.get("https://api.ipify.org", timeout=10)
response.raise_for_status() response.raise_for_status()
return response.text.strip() result: str = response.text.strip()
return result
def resolve_hostname(hostname: str) -> list[str]: def resolve_hostname(hostname: str) -> list[str]:
@ -27,7 +28,7 @@ def resolve_hostname(hostname: str) -> list[str]:
return [] return []
def verify_dns_simple(hostname: str, expected_ip: Optional[str] = None) -> bool: def verify_dns_simple(hostname: str, expected_ip: str | None = None) -> bool:
"""Simple DNS verification - check hostname resolves to expected IP. """Simple DNS verification - check hostname resolves to expected IP.
If expected_ip not provided, uses server's egress IP. If expected_ip not provided, uses server's egress IP.
@ -98,9 +99,7 @@ def delete_probe_ingress(namespace: str = "default"):
"""Delete the temporary probe ingress.""" """Delete the temporary probe ingress."""
networking_api = client.NetworkingV1Api() networking_api = client.NetworkingV1Api()
try: try:
networking_api.delete_namespaced_ingress( networking_api.delete_namespaced_ingress(name="laconic-dns-probe", namespace=namespace)
name="laconic-dns-probe", namespace=namespace
)
except client.exceptions.ApiException: except client.exceptions.ApiException:
pass # Ignore if already deleted pass # Ignore if already deleted

View File

@ -13,15 +13,14 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from typing import Set
from python_on_whales import DockerClient from python_on_whales import DockerClient
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deploy_util import images_for_deployment from stack_orchestrator.deploy.deploy_util import images_for_deployment
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.opts import opts
def _image_needs_pushed(image: str): def _image_needs_pushed(image: str):
@ -32,9 +31,7 @@ def _image_needs_pushed(image: str):
def _remote_tag_for_image(image: str, remote_repo_url: str): def _remote_tag_for_image(image: str, remote_repo_url: str):
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
major_parts = image.split("/", 2) major_parts = image.split("/", 2)
image_name_with_version = ( image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
major_parts[1] if 2 == len(major_parts) else major_parts[0]
)
(image_name, image_version) = image_name_with_version.split(":") (image_name, image_version) = image_name_with_version.split(":")
if image_version == "local": if image_version == "local":
return f"{remote_repo_url}/{image_name}:deploy" return f"{remote_repo_url}/{image_name}:deploy"
@ -63,18 +60,14 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
docker = DockerClient() docker = DockerClient()
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url) remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
new_remote_tags = [ new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags
]
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags) docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str): def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
major_parts = image.split("/", 2) major_parts = image.split("/", 2)
image_name_with_version = ( image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
major_parts[1] if 2 == len(major_parts) else major_parts[0]
)
(image_name, image_version) = image_name_with_version.split(":") (image_name, image_version) = image_name_with_version.split(":")
if image_version == "local": if image_version == "local":
# Salt the tag with part of the deployment id to make it unique to this # Salt the tag with part of the deployment id to make it unique to this
@ -91,24 +84,20 @@ def push_images_operation(
): ):
# Get the list of images for the stack # Get the list of images for the stack
cluster_context = command_context.cluster_context cluster_context = command_context.cluster_context
images: Set[str] = images_for_deployment(cluster_context.compose_files) images: set[str] = images_for_deployment(cluster_context.compose_files)
# Tag the images for the remote repo # Tag the images for the remote repo
remote_repo_url = deployment_context.spec.obj[constants.image_registry_key] remote_repo_url = deployment_context.spec.obj[constants.image_registry_key]
docker = DockerClient() docker = DockerClient()
for image in images: for image in images:
if _image_needs_pushed(image): if _image_needs_pushed(image):
remote_tag = remote_tag_for_image_unique( remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
image, remote_repo_url, deployment_context.id
)
if opts.o.verbose: if opts.o.verbose:
print(f"Tagging {image} to {remote_tag}") print(f"Tagging {image} to {remote_tag}")
docker.image.tag(image, remote_tag) docker.image.tag(image, remote_tag)
# Run docker push commands to upload # Run docker push commands to upload
for image in images: for image in images:
if _image_needs_pushed(image): if _image_needs_pushed(image):
remote_tag = remote_tag_for_image_unique( remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
image, remote_repo_url, deployment_context.id
)
if opts.o.verbose: if opts.o.verbose:
print(f"Pushing image {remote_tag}") print(f"Pushing image {remote_tag}")
docker.image.push(remote_tag) docker.image.push(remote_tag)

View File

@ -13,33 +13,31 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import os
import base64 import base64
import os
from typing import Any
from kubernetes import client from kubernetes import client
from typing import Any, List, Optional, Set
from stack_orchestrator.opts import opts from stack_orchestrator.deploy.deploy_types import DeployEnvVars
from stack_orchestrator.util import env_var_map_from_file from stack_orchestrator.deploy.deploy_util import (
images_for_deployment,
parsed_pod_files_map_from_file_names,
)
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
from stack_orchestrator.deploy.k8s.helpers import ( from stack_orchestrator.deploy.k8s.helpers import (
envs_from_compose_file,
envs_from_environment_variables_map,
get_kind_pv_bind_mount_path,
merge_envs,
named_volumes_from_pod_files, named_volumes_from_pod_files,
translate_sidecar_service_names,
volume_mounts_for_service, volume_mounts_for_service,
volumes_for_pod_files, volumes_for_pod_files,
) )
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path from stack_orchestrator.deploy.spec import ResourceLimits, Resources, Spec
from stack_orchestrator.deploy.k8s.helpers import ( from stack_orchestrator.opts import opts
envs_from_environment_variables_map, from stack_orchestrator.util import env_var_map_from_file
envs_from_compose_file,
merge_envs,
translate_sidecar_service_names,
)
from stack_orchestrator.deploy.deploy_util import (
parsed_pod_files_map_from_file_names,
images_for_deployment,
)
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
DEFAULT_VOLUME_RESOURCES = Resources({"reservations": {"storage": "2Gi"}}) DEFAULT_VOLUME_RESOURCES = Resources({"reservations": {"storage": "2Gi"}})
@ -52,7 +50,7 @@ DEFAULT_CONTAINER_RESOURCES = Resources(
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements: def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
def to_dict(limits: Optional[ResourceLimits]): def to_dict(limits: ResourceLimits | None):
if not limits: if not limits:
return None return None
@ -72,7 +70,7 @@ def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequi
class ClusterInfo: class ClusterInfo:
parsed_pod_yaml_map: Any parsed_pod_yaml_map: Any
image_set: Set[str] = set() image_set: set[str] = set()
app_name: str app_name: str
environment_variables: DeployEnvVars environment_variables: DeployEnvVars
spec: Spec spec: Spec
@ -80,14 +78,12 @@ class ClusterInfo:
def __init__(self) -> None: def __init__(self) -> None:
pass pass
def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec): def int(self, pod_files: list[str], compose_env_file, deployment_name, spec: Spec):
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
# Find the set of images in the pods # Find the set of images in the pods
self.image_set = images_for_deployment(pod_files) self.image_set = images_for_deployment(pod_files)
# Filter out None values from env file # Filter out None values from env file
env_vars = { env_vars = {k: v for k, v in env_var_map_from_file(compose_env_file).items() if v}
k: v for k, v in env_var_map_from_file(compose_env_file).items() if v
}
self.environment_variables = DeployEnvVars(env_vars) self.environment_variables = DeployEnvVars(env_vars)
self.app_name = deployment_name self.app_name = deployment_name
self.spec = spec self.spec = spec
@ -124,8 +120,7 @@ class ClusterInfo:
service = client.V1Service( service = client.V1Service(
metadata=client.V1ObjectMeta( metadata=client.V1ObjectMeta(
name=( name=(
f"{self.app_name}-nodeport-" f"{self.app_name}-nodeport-" f"{pod_port}-{protocol.lower()}"
f"{pod_port}-{protocol.lower()}"
), ),
labels={"app": self.app_name}, labels={"app": self.app_name},
), ),
@ -145,9 +140,7 @@ class ClusterInfo:
nodeports.append(service) nodeports.append(service)
return nodeports return nodeports
def get_ingress( def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"
):
# No ingress for a deployment that has no http-proxy defined, for now # No ingress for a deployment that has no http-proxy defined, for now
http_proxy_info_list = self.spec.get_http_proxy() http_proxy_info_list = self.spec.get_http_proxy()
ingress = None ingress = None
@ -162,9 +155,7 @@ class ClusterInfo:
tls = ( tls = (
[ [
client.V1IngressTLS( client.V1IngressTLS(
hosts=certificate["spec"]["dnsNames"] hosts=certificate["spec"]["dnsNames"] if certificate else [host_name],
if certificate
else [host_name],
secret_name=certificate["spec"]["secretName"] secret_name=certificate["spec"]["secretName"]
if certificate if certificate
else f"{self.app_name}-tls", else f"{self.app_name}-tls",
@ -237,8 +228,7 @@ class ClusterInfo:
return None return None
service_ports = [ service_ports = [
client.V1ServicePort(port=p, target_port=p, name=f"port-{p}") client.V1ServicePort(port=p, target_port=p, name=f"port-{p}") for p in sorted(ports_set)
for p in sorted(ports_set)
] ]
service = client.V1Service( service = client.V1Service(
@ -290,9 +280,7 @@ class ClusterInfo:
volume_name=k8s_volume_name, volume_name=k8s_volume_name,
) )
pvc = client.V1PersistentVolumeClaim( pvc = client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta( metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels),
name=f"{self.app_name}-{volume_name}", labels=labels
),
spec=spec, spec=spec,
) )
result.append(pvc) result.append(pvc)
@ -309,9 +297,7 @@ class ClusterInfo:
continue continue
if not cfg_map_path.startswith("/") and self.spec.file_path is not None: if not cfg_map_path.startswith("/") and self.spec.file_path is not None:
cfg_map_path = os.path.join( cfg_map_path = os.path.join(os.path.dirname(str(self.spec.file_path)), cfg_map_path)
os.path.dirname(str(self.spec.file_path)), cfg_map_path
)
# Read in all the files at a single-level of the directory. # Read in all the files at a single-level of the directory.
# This mimics the behavior of # This mimics the behavior of
@ -320,9 +306,7 @@ class ClusterInfo:
for f in os.listdir(cfg_map_path): for f in os.listdir(cfg_map_path):
full_path = os.path.join(cfg_map_path, f) full_path = os.path.join(cfg_map_path, f)
if os.path.isfile(full_path): if os.path.isfile(full_path):
data[f] = base64.b64encode(open(full_path, "rb").read()).decode( data[f] = base64.b64encode(open(full_path, "rb").read()).decode("ASCII")
"ASCII"
)
spec = client.V1ConfigMap( spec = client.V1ConfigMap(
metadata=client.V1ObjectMeta( metadata=client.V1ObjectMeta(
@ -425,7 +409,7 @@ class ClusterInfo:
return global_resources return global_resources
# TODO: put things like image pull policy into an object-scope struct # TODO: put things like image pull policy into an object-scope struct
def get_deployment(self, image_pull_policy: Optional[str] = None): def get_deployment(self, image_pull_policy: str | None = None):
containers = [] containers = []
services = {} services = {}
global_resources = self.spec.get_container_resources() global_resources = self.spec.get_container_resources()
@ -453,9 +437,7 @@ class ClusterInfo:
port_str = port_str.split(":")[-1] port_str = port_str.split(":")[-1]
port = int(port_str) port = int(port_str)
container_ports.append( container_ports.append(
client.V1ContainerPort( client.V1ContainerPort(container_port=port, protocol=protocol)
container_port=port, protocol=protocol
)
) )
if opts.o.debug: if opts.o.debug:
print(f"image: {image}") print(f"image: {image}")
@ -473,9 +455,7 @@ class ClusterInfo:
# Translate docker-compose service names to localhost for sidecars # Translate docker-compose service names to localhost for sidecars
# All services in the same pod share the network namespace # All services in the same pod share the network namespace
sibling_services = [s for s in services.keys() if s != service_name] sibling_services = [s for s in services.keys() if s != service_name]
merged_envs = translate_sidecar_service_names( merged_envs = translate_sidecar_service_names(merged_envs, sibling_services)
merged_envs, sibling_services
)
envs = envs_from_environment_variables_map(merged_envs) envs = envs_from_environment_variables_map(merged_envs)
if opts.o.debug: if opts.o.debug:
print(f"Merged envs: {envs}") print(f"Merged envs: {envs}")
@ -488,18 +468,14 @@ class ClusterInfo:
if self.spec.get_image_registry() is not None if self.spec.get_image_registry() is not None
else image else image
) )
volume_mounts = volume_mounts_for_service( volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
self.parsed_pod_yaml_map, service_name
)
# Handle command/entrypoint from compose file # Handle command/entrypoint from compose file
# In docker-compose: entrypoint -> k8s command, command -> k8s args # In docker-compose: entrypoint -> k8s command, command -> k8s args
container_command = None container_command = None
container_args = None container_args = None
if "entrypoint" in service_info: if "entrypoint" in service_info:
entrypoint = service_info["entrypoint"] entrypoint = service_info["entrypoint"]
container_command = ( container_command = entrypoint if isinstance(entrypoint, list) else [entrypoint]
entrypoint if isinstance(entrypoint, list) else [entrypoint]
)
if "command" in service_info: if "command" in service_info:
cmd = service_info["command"] cmd = service_info["command"]
container_args = cmd if isinstance(cmd, list) else cmd.split() container_args = cmd if isinstance(cmd, list) else cmd.split()
@ -528,18 +504,14 @@ class ClusterInfo:
volume_mounts=volume_mounts, volume_mounts=volume_mounts,
security_context=client.V1SecurityContext( security_context=client.V1SecurityContext(
privileged=self.spec.get_privileged(), privileged=self.spec.get_privileged(),
capabilities=client.V1Capabilities( capabilities=client.V1Capabilities(add=self.spec.get_capabilities())
add=self.spec.get_capabilities()
)
if self.spec.get_capabilities() if self.spec.get_capabilities()
else None, else None,
), ),
resources=to_k8s_resource_requirements(container_resources), resources=to_k8s_resource_requirements(container_resources),
) )
containers.append(container) containers.append(container)
volumes = volumes_for_pod_files( volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name)
self.parsed_pod_yaml_map, self.spec, self.app_name
)
registry_config = self.spec.get_image_registry_config() registry_config = self.spec.get_image_registry_config()
if registry_config: if registry_config:
secret_name = f"{self.app_name}-registry" secret_name = f"{self.app_name}-registry"

View File

@ -14,42 +14,36 @@
import time import time
from datetime import datetime, timezone from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
from typing import Any, cast
from kubernetes import client, config from kubernetes import client, config
from kubernetes.client.exceptions import ApiException from kubernetes.client.exceptions import ApiException
from typing import Any, Dict, List, Optional, cast
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
from stack_orchestrator.deploy.k8s.helpers import ( from stack_orchestrator.deploy.k8s.helpers import (
containers_in_pod,
create_cluster, create_cluster,
destroy_cluster, destroy_cluster,
load_images_into_kind,
)
from stack_orchestrator.deploy.k8s.helpers import (
install_ingress_for_kind,
wait_for_ingress_in_kind,
is_ingress_running,
)
from stack_orchestrator.deploy.k8s.helpers import (
pods_in_deployment,
containers_in_pod,
log_stream_from_string,
)
from stack_orchestrator.deploy.k8s.helpers import (
generate_kind_config,
generate_high_memlock_spec_json, generate_high_memlock_spec_json,
generate_kind_config,
install_ingress_for_kind,
is_ingress_running,
load_images_into_kind,
log_stream_from_string,
pods_in_deployment,
wait_for_ingress_in_kind,
) )
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.util import error_exit from stack_orchestrator.util import error_exit
class AttrDict(dict): class AttrDict(dict):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.__dict__ = self self.__dict__ = self
@ -144,9 +138,7 @@ class K8sDeployer(Deployer):
else: else:
# Get the config file and pass to load_kube_config() # Get the config file and pass to load_kube_config()
config.load_kube_config( config.load_kube_config(
config_file=self.deployment_dir.joinpath( config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix()
constants.kube_config_filename
).as_posix()
) )
self.core_api = client.CoreV1Api() self.core_api = client.CoreV1Api()
self.networking_api = client.NetworkingV1Api() self.networking_api = client.NetworkingV1Api()
@ -213,10 +205,7 @@ class K8sDeployer(Deployer):
) )
if opts.o.debug: if opts.o.debug:
print( print(f"Namespace {self.k8s_namespace} is terminating, " f"waiting for deletion...")
f"Namespace {self.k8s_namespace} is terminating, "
f"waiting for deletion..."
)
time.sleep(2) time.sleep(2)
def _delete_namespace(self): def _delete_namespace(self):
@ -276,9 +265,7 @@ class K8sDeployer(Deployer):
name=deployment.metadata.name, name=deployment.metadata.name,
namespace=self.k8s_namespace, namespace=self.k8s_namespace,
) )
deployment.metadata.resource_version = ( deployment.metadata.resource_version = existing.metadata.resource_version
existing.metadata.resource_version
)
resp = cast( resp = cast(
client.V1Deployment, client.V1Deployment,
self.apps_api.replace_namespaced_deployment( self.apps_api.replace_namespaced_deployment(
@ -391,9 +378,7 @@ class K8sDeployer(Deployer):
print(f"Sending this pv: {pv}") print(f"Sending this pv: {pv}")
if not opts.o.dry_run: if not opts.o.dry_run:
try: try:
pv_resp = self.core_api.read_persistent_volume( pv_resp = self.core_api.read_persistent_volume(name=pv.metadata.name)
name=pv.metadata.name
)
if pv_resp: if pv_resp:
if opts.o.debug: if opts.o.debug:
print("PVs already present:") print("PVs already present:")
@ -500,9 +485,9 @@ class K8sDeployer(Deployer):
if before < now < after: if before < now < after:
# Check the status is Ready # Check the status is Ready
for condition in status.get("conditions", []): for condition in status.get("conditions", []):
if "True" == condition.get( if "True" == condition.get("status") and "Ready" == condition.get(
"status" "type"
) and "Ready" == condition.get("type"): ):
return cert return cert
return None return None
@ -519,15 +504,11 @@ class K8sDeployer(Deployer):
self.skip_cluster_management = skip_cluster_management self.skip_cluster_management = skip_cluster_management
if not opts.o.dry_run: if not opts.o.dry_run:
if self.is_kind() and not self.skip_cluster_management: if self.is_kind() and not self.skip_cluster_management:
kind_config = str( kind_config = str(self.deployment_dir.joinpath(constants.kind_config_filename))
self.deployment_dir.joinpath(constants.kind_config_filename)
)
actual_cluster = create_cluster(self.kind_cluster_name, kind_config) actual_cluster = create_cluster(self.kind_cluster_name, kind_config)
if actual_cluster != self.kind_cluster_name: if actual_cluster != self.kind_cluster_name:
self.kind_cluster_name = actual_cluster self.kind_cluster_name = actual_cluster
local_containers = self.deployment_context.stack.obj.get( local_containers = self.deployment_context.stack.obj.get("containers", [])
"containers", []
)
if local_containers: if local_containers:
local_images = { local_images = {
img img
@ -579,9 +560,7 @@ class K8sDeployer(Deployer):
if opts.o.debug and certificate: if opts.o.debug and certificate:
print(f"Using existing certificate: {certificate}") print(f"Using existing certificate: {certificate}")
ingress = self.cluster_info.get_ingress( ingress = self.cluster_info.get_ingress(use_tls=use_tls, certificate=certificate)
use_tls=use_tls, certificate=certificate
)
if ingress: if ingress:
if opts.o.debug: if opts.o.debug:
print(f"Sending this ingress: {ingress}") print(f"Sending this ingress: {ingress}")
@ -590,7 +569,7 @@ class K8sDeployer(Deployer):
elif opts.o.debug: elif opts.o.debug:
print("No ingress configured") print("No ingress configured")
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports() nodeports: list[client.V1Service] = self.cluster_info.get_nodeports()
for nodeport in nodeports: for nodeport in nodeports:
if opts.o.debug: if opts.o.debug:
print(f"Sending this nodeport: {nodeport}") print(f"Sending this nodeport: {nodeport}")
@ -670,7 +649,7 @@ class K8sDeployer(Deployer):
return return
cert = cast( cert = cast(
Dict[str, Any], dict[str, Any],
self.custom_obj_api.get_namespaced_custom_object( self.custom_obj_api.get_namespaced_custom_object(
group="cert-manager.io", group="cert-manager.io",
version="v1", version="v1",
@ -686,7 +665,7 @@ class K8sDeployer(Deployer):
if lb_ingress: if lb_ingress:
ip = lb_ingress[0].ip or "?" ip = lb_ingress[0].ip or "?"
cert_status = cert.get("status", {}) cert_status = cert.get("status", {})
tls = "notBefore: %s; notAfter: %s; names: %s" % ( tls = "notBefore: {}; notAfter: {}; names: {}".format(
cert_status.get("notBefore", "?"), cert_status.get("notBefore", "?"),
cert_status.get("notAfter", "?"), cert_status.get("notAfter", "?"),
ingress.spec.tls[0].hosts, ingress.spec.tls[0].hosts,
@ -727,9 +706,7 @@ class K8sDeployer(Deployer):
if c.ports: if c.ports:
for prt in c.ports: for prt in c.ports:
ports[str(prt.container_port)] = [ ports[str(prt.container_port)] = [
AttrDict( AttrDict({"HostIp": pod_ip, "HostPort": prt.container_port})
{"HostIp": pod_ip, "HostPort": prt.container_port}
)
] ]
ret.append( ret.append(
@ -791,9 +768,7 @@ class K8sDeployer(Deployer):
deployment = cast( deployment = cast(
client.V1Deployment, client.V1Deployment,
self.apps_api.read_namespaced_deployment( self.apps_api.read_namespaced_deployment(name=ref_name, namespace=self.k8s_namespace),
name=ref_name, namespace=self.k8s_namespace
),
) )
if not deployment.spec or not deployment.spec.template: if not deployment.spec or not deployment.spec.template:
return return
@ -832,14 +807,14 @@ class K8sDeployer(Deployer):
user=None, user=None,
volumes=None, volumes=None,
entrypoint=None, entrypoint=None,
env={}, env=None,
ports=[], ports=None,
detach=False, detach=False,
): ):
# We need to figure out how to do this -- check why we're being called first # We need to figure out how to do this -- check why we're being called first
pass pass
def run_job(self, job_name: str, helm_release: Optional[str] = None): def run_job(self, job_name: str, helm_release: str | None = None):
if not opts.o.dry_run: if not opts.o.dry_run:
from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job
@ -881,13 +856,9 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
# Must be done before generate_kind_config() which references it. # Must be done before generate_kind_config() which references it.
if self.deployment_context.spec.get_unlimited_memlock(): if self.deployment_context.spec.get_unlimited_memlock():
spec_content = generate_high_memlock_spec_json() spec_content = generate_high_memlock_spec_json()
spec_file = deployment_dir.joinpath( spec_file = deployment_dir.joinpath(constants.high_memlock_spec_filename)
constants.high_memlock_spec_filename
)
if opts.o.debug: if opts.o.debug:
print( print(f"Creating high-memlock spec for unlimited memlock: {spec_file}")
f"Creating high-memlock spec for unlimited memlock: {spec_file}"
)
with open(spec_file, "w") as output_file: with open(spec_file, "w") as output_file:
output_file.write(spec_content) output_file.write(spec_content)

View File

@ -16,21 +16,21 @@
from pathlib import Path from pathlib import Path
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
get_parsed_stack_config,
get_pod_list,
get_pod_file_path,
get_job_list,
get_job_file_path,
error_exit,
)
from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import ( from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import (
check_kompose_available, check_kompose_available,
get_kompose_version,
convert_to_helm_chart, convert_to_helm_chart,
get_kompose_version,
)
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
error_exit,
get_job_file_path,
get_job_list,
get_parsed_stack_config,
get_pod_file_path,
get_pod_list,
get_yaml,
) )
from stack_orchestrator.util import get_yaml
def _wrap_job_templates_with_conditionals(chart_dir: Path, jobs: list) -> None: def _wrap_job_templates_with_conditionals(chart_dir: Path, jobs: list) -> None:
@ -88,7 +88,7 @@ def _post_process_chart(chart_dir: Path, chart_name: str, jobs: list) -> None:
# Fix Chart.yaml # Fix Chart.yaml
chart_yaml_path = chart_dir / "Chart.yaml" chart_yaml_path = chart_dir / "Chart.yaml"
if chart_yaml_path.exists(): if chart_yaml_path.exists():
chart_yaml = yaml.load(open(chart_yaml_path, "r")) chart_yaml = yaml.load(open(chart_yaml_path))
# Fix name # Fix name
chart_yaml["name"] = chart_name chart_yaml["name"] = chart_name
@ -108,9 +108,7 @@ def _post_process_chart(chart_dir: Path, chart_name: str, jobs: list) -> None:
_wrap_job_templates_with_conditionals(chart_dir, jobs) _wrap_job_templates_with_conditionals(chart_dir, jobs)
def generate_helm_chart( def generate_helm_chart(stack_path: str, spec_file: str, deployment_dir_path: Path) -> None:
stack_path: str, spec_file: str, deployment_dir_path: Path
) -> None:
""" """
Generate a self-sufficient Helm chart from stack compose files using Kompose. Generate a self-sufficient Helm chart from stack compose files using Kompose.
@ -152,7 +150,7 @@ def generate_helm_chart(
error_exit(f"Deployment file not found: {deployment_file}") error_exit(f"Deployment file not found: {deployment_file}")
yaml = get_yaml() yaml = get_yaml()
deployment_config = yaml.load(open(deployment_file, "r")) deployment_config = yaml.load(open(deployment_file))
cluster_id = deployment_config.get(constants.cluster_id_key) cluster_id = deployment_config.get(constants.cluster_id_key)
if not cluster_id: if not cluster_id:
error_exit(f"cluster-id not found in {deployment_file}") error_exit(f"cluster-id not found in {deployment_file}")
@ -219,10 +217,7 @@ def generate_helm_chart(
# 5. Create chart directory and invoke Kompose # 5. Create chart directory and invoke Kompose
chart_dir = deployment_dir_path / "chart" chart_dir = deployment_dir_path / "chart"
print( print(f"Converting {len(compose_files)} compose file(s) to Helm chart " "using Kompose...")
f"Converting {len(compose_files)} compose file(s) to Helm chart "
"using Kompose..."
)
try: try:
output = convert_to_helm_chart( output = convert_to_helm_chart(
@ -304,9 +299,7 @@ Edit the generated template files in `templates/` to customize:
# Count generated files # Count generated files
template_files = ( template_files = (
list((chart_dir / "templates").glob("*.yaml")) list((chart_dir / "templates").glob("*.yaml")) if (chart_dir / "templates").exists() else []
if (chart_dir / "templates").exists()
else []
) )
print(f" Files: {len(template_files)} template(s) generated") print(f" Files: {len(template_files)} template(s) generated")

View File

@ -13,12 +13,12 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import json
import os
import subprocess import subprocess
import tempfile import tempfile
import os
import json
from pathlib import Path from pathlib import Path
from typing import Optional
from stack_orchestrator.util import get_yaml from stack_orchestrator.util import get_yaml
@ -40,18 +40,19 @@ def get_release_name_from_chart(chart_dir: Path) -> str:
raise Exception(f"Chart.yaml not found: {chart_yaml_path}") raise Exception(f"Chart.yaml not found: {chart_yaml_path}")
yaml = get_yaml() yaml = get_yaml()
chart_yaml = yaml.load(open(chart_yaml_path, "r")) chart_yaml = yaml.load(open(chart_yaml_path))
if "name" not in chart_yaml: if "name" not in chart_yaml:
raise Exception(f"Chart name not found in {chart_yaml_path}") raise Exception(f"Chart name not found in {chart_yaml_path}")
return chart_yaml["name"] name: str = chart_yaml["name"]
return name
def run_helm_job( def run_helm_job(
chart_dir: Path, chart_dir: Path,
job_name: str, job_name: str,
release: Optional[str] = None, release: str | None = None,
namespace: str = "default", namespace: str = "default",
timeout: int = 600, timeout: int = 600,
verbose: bool = False, verbose: bool = False,
@ -94,9 +95,7 @@ def run_helm_job(
print(f"Running job '{job_name}' from helm chart: {chart_dir}") print(f"Running job '{job_name}' from helm chart: {chart_dir}")
# Use helm template to render the job manifest # Use helm template to render the job manifest
with tempfile.NamedTemporaryFile( with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as tmp_file:
mode="w", suffix=".yaml", delete=False
) as tmp_file:
try: try:
# Render job template with job enabled # Render job template with job enabled
# Use --set-json to properly handle job names with dashes # Use --set-json to properly handle job names with dashes
@ -116,9 +115,7 @@ def run_helm_job(
if verbose: if verbose:
print(f"Running: {' '.join(helm_cmd)}") print(f"Running: {' '.join(helm_cmd)}")
result = subprocess.run( result = subprocess.run(helm_cmd, check=True, capture_output=True, text=True)
helm_cmd, check=True, capture_output=True, text=True
)
tmp_file.write(result.stdout) tmp_file.write(result.stdout)
tmp_file.flush() tmp_file.flush()
@ -139,9 +136,7 @@ def run_helm_job(
"-n", "-n",
namespace, namespace,
] ]
subprocess.run( subprocess.run(kubectl_apply_cmd, check=True, capture_output=True, text=True)
kubectl_apply_cmd, check=True, capture_output=True, text=True
)
if verbose: if verbose:
print(f"Job {actual_job_name} created, waiting for completion...") print(f"Job {actual_job_name} created, waiting for completion...")
@ -164,7 +159,7 @@ def run_helm_job(
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = e.stderr if e.stderr else str(e) error_msg = e.stderr if e.stderr else str(e)
raise Exception(f"Job failed: {error_msg}") raise Exception(f"Job failed: {error_msg}") from e
finally: finally:
# Clean up temp file # Clean up temp file
if os.path.exists(tmp_file.name): if os.path.exists(tmp_file.name):

View File

@ -13,10 +13,9 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import subprocess
import shutil import shutil
import subprocess
from pathlib import Path from pathlib import Path
from typing import List, Optional
def check_kompose_available() -> bool: def check_kompose_available() -> bool:
@ -37,9 +36,7 @@ def get_kompose_version() -> str:
if not check_kompose_available(): if not check_kompose_available():
raise Exception("kompose not found in PATH") raise Exception("kompose not found in PATH")
result = subprocess.run( result = subprocess.run(["kompose", "version"], capture_output=True, text=True, timeout=10)
["kompose", "version"], capture_output=True, text=True, timeout=10
)
if result.returncode != 0: if result.returncode != 0:
raise Exception(f"Failed to get kompose version: {result.stderr}") raise Exception(f"Failed to get kompose version: {result.stderr}")
@ -53,7 +50,7 @@ def get_kompose_version() -> str:
def convert_to_helm_chart( def convert_to_helm_chart(
compose_files: List[Path], output_dir: Path, chart_name: Optional[str] = None compose_files: list[Path], output_dir: Path, chart_name: str | None = None
) -> str: ) -> str:
""" """
Invoke kompose to convert Docker Compose files to a Helm chart. Invoke kompose to convert Docker Compose files to a Helm chart.
@ -71,8 +68,7 @@ def convert_to_helm_chart(
""" """
if not check_kompose_available(): if not check_kompose_available():
raise Exception( raise Exception(
"kompose not found in PATH. " "kompose not found in PATH. " "Install from: https://kompose.io/installation/"
"Install from: https://kompose.io/installation/"
) )
# Ensure output directory exists # Ensure output directory exists
@ -95,9 +91,7 @@ def convert_to_helm_chart(
if result.returncode != 0: if result.returncode != 0:
raise Exception( raise Exception(
f"Kompose conversion failed:\n" f"Kompose conversion failed:\n" f"Command: {' '.join(cmd)}\n" f"Error: {result.stderr}"
f"Command: {' '.join(cmd)}\n"
f"Error: {result.stderr}"
) )
return result.stdout return result.stdout

View File

@ -13,20 +13,22 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import os
import re
import subprocess
from collections.abc import Mapping
from pathlib import Path
from typing import cast
import yaml
from kubernetes import client, utils, watch from kubernetes import client, utils, watch
from kubernetes.client.exceptions import ApiException from kubernetes.client.exceptions import ApiException
import os
from pathlib import Path
import subprocess
import re
from typing import Set, Mapping, List, Optional, cast
import yaml
from stack_orchestrator.util import get_k8s_dir, error_exit from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
from stack_orchestrator.deploy.deployer import DeployerException from stack_orchestrator.deploy.deployer import DeployerException
from stack_orchestrator import constants from stack_orchestrator.opts import opts
from stack_orchestrator.util import error_exit, get_k8s_dir
def is_host_path_mount(volume_name: str) -> bool: def is_host_path_mount(volume_name: str) -> bool:
@ -77,9 +79,7 @@ def get_kind_cluster():
Uses `kind get clusters` to find existing clusters. Uses `kind get clusters` to find existing clusters.
Returns the cluster name or None if no cluster exists. Returns the cluster name or None if no cluster exists.
""" """
result = subprocess.run( result = subprocess.run("kind get clusters", shell=True, capture_output=True, text=True)
"kind get clusters", shell=True, capture_output=True, text=True
)
if result.returncode != 0: if result.returncode != 0:
return None return None
@ -98,12 +98,12 @@ def _run_command(command: str):
return result return result
def _get_etcd_host_path_from_kind_config(config_file: str) -> Optional[str]: def _get_etcd_host_path_from_kind_config(config_file: str) -> str | None:
"""Extract etcd host path from kind config extraMounts.""" """Extract etcd host path from kind config extraMounts."""
import yaml import yaml
try: try:
with open(config_file, "r") as f: with open(config_file) as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)
except Exception: except Exception:
return None return None
@ -113,7 +113,8 @@ def _get_etcd_host_path_from_kind_config(config_file: str) -> Optional[str]:
extra_mounts = node.get("extraMounts", []) extra_mounts = node.get("extraMounts", [])
for mount in extra_mounts: for mount in extra_mounts:
if mount.get("containerPath") == "/var/lib/etcd": if mount.get("containerPath") == "/var/lib/etcd":
return mount.get("hostPath") host_path: str | None = mount.get("hostPath")
return host_path
return None return None
@ -133,8 +134,7 @@ def _clean_etcd_keeping_certs(etcd_path: str) -> bool:
db_path = Path(etcd_path) / "member" / "snap" / "db" db_path = Path(etcd_path) / "member" / "snap" / "db"
# Check existence using docker since etcd dir is root-owned # Check existence using docker since etcd dir is root-owned
check_cmd = ( check_cmd = (
f"docker run --rm -v {etcd_path}:/etcd:ro alpine:3.19 " f"docker run --rm -v {etcd_path}:/etcd:ro alpine:3.19 " "test -f /etcd/member/snap/db"
"test -f /etcd/member/snap/db"
) )
check_result = subprocess.run(check_cmd, shell=True, capture_output=True) check_result = subprocess.run(check_cmd, shell=True, capture_output=True)
if check_result.returncode != 0: if check_result.returncode != 0:
@ -337,7 +337,7 @@ def is_ingress_running() -> bool:
def wait_for_ingress_in_kind(): def wait_for_ingress_in_kind():
core_v1 = client.CoreV1Api() core_v1 = client.CoreV1Api()
for i in range(20): for _i in range(20):
warned_waiting = False warned_waiting = False
w = watch.Watch() w = watch.Watch()
for event in w.stream( for event in w.stream(
@ -364,9 +364,7 @@ def wait_for_ingress_in_kind():
def install_ingress_for_kind(acme_email: str = ""): def install_ingress_for_kind(acme_email: str = ""):
api_client = client.ApiClient() api_client = client.ApiClient()
ingress_install = os.path.abspath( ingress_install = os.path.abspath(
get_k8s_dir().joinpath( get_k8s_dir().joinpath("components", "ingress", "ingress-caddy-kind-deploy.yaml")
"components", "ingress", "ingress-caddy-kind-deploy.yaml"
)
) )
if opts.o.debug: if opts.o.debug:
print("Installing Caddy ingress controller in kind cluster") print("Installing Caddy ingress controller in kind cluster")
@ -400,11 +398,9 @@ def install_ingress_for_kind(acme_email: str = ""):
) )
def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]): def load_images_into_kind(kind_cluster_name: str, image_set: set[str]):
for image in image_set: for image in image_set:
result = _run_command( result = _run_command(f"kind load docker-image {image} --name {kind_cluster_name}")
f"kind load docker-image {image} --name {kind_cluster_name}"
)
if result.returncode != 0: if result.returncode != 0:
raise DeployerException(f"kind load docker-image failed: {result}") raise DeployerException(f"kind load docker-image failed: {result}")
@ -422,11 +418,9 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
return pods return pods
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str) -> List[str]: def containers_in_pod(core_api: client.CoreV1Api, pod_name: str) -> list[str]:
containers: List[str] = [] containers: list[str] = []
pod_response = cast( pod_response = cast(client.V1Pod, core_api.read_namespaced_pod(pod_name, namespace="default"))
client.V1Pod, core_api.read_namespaced_pod(pod_name, namespace="default")
)
if opts.o.debug: if opts.o.debug:
print(f"pod_response: {pod_response}") print(f"pod_response: {pod_response}")
if not pod_response.spec or not pod_response.spec.containers: if not pod_response.spec or not pod_response.spec.containers:
@ -449,7 +443,7 @@ def named_volumes_from_pod_files(parsed_pod_files):
parsed_pod_file = parsed_pod_files[pod] parsed_pod_file = parsed_pod_files[pod]
if "volumes" in parsed_pod_file: if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"] volumes = parsed_pod_file["volumes"]
for volume, value in volumes.items(): for volume, _value in volumes.items():
# Volume definition looks like: # Volume definition looks like:
# 'laconicd-data': None # 'laconicd-data': None
named_volumes.append(volume) named_volumes.append(volume)
@ -481,14 +475,10 @@ def volume_mounts_for_service(parsed_pod_files, service):
mount_split = mount_string.split(":") mount_split = mount_string.split(":")
volume_name = mount_split[0] volume_name = mount_split[0]
mount_path = mount_split[1] mount_path = mount_split[1]
mount_options = ( mount_options = mount_split[2] if len(mount_split) == 3 else None
mount_split[2] if len(mount_split) == 3 else None
)
# For host path mounts, use sanitized name # For host path mounts, use sanitized name
if is_host_path_mount(volume_name): if is_host_path_mount(volume_name):
k8s_volume_name = sanitize_host_path_to_volume_name( k8s_volume_name = sanitize_host_path_to_volume_name(volume_name)
volume_name
)
else: else:
k8s_volume_name = volume_name k8s_volume_name = volume_name
if opts.o.debug: if opts.o.debug:
@ -527,9 +517,7 @@ def volumes_for_pod_files(parsed_pod_files, spec, app_name):
claim = client.V1PersistentVolumeClaimVolumeSource( claim = client.V1PersistentVolumeClaimVolumeSource(
claim_name=f"{app_name}-{volume_name}" claim_name=f"{app_name}-{volume_name}"
) )
volume = client.V1Volume( volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim)
name=volume_name, persistent_volume_claim=claim
)
result.append(volume) result.append(volume)
# Handle host path mounts from service volumes # Handle host path mounts from service volumes
@ -542,15 +530,11 @@ def volumes_for_pod_files(parsed_pod_files, spec, app_name):
mount_split = mount_string.split(":") mount_split = mount_string.split(":")
volume_source = mount_split[0] volume_source = mount_split[0]
if is_host_path_mount(volume_source): if is_host_path_mount(volume_source):
sanitized_name = sanitize_host_path_to_volume_name( sanitized_name = sanitize_host_path_to_volume_name(volume_source)
volume_source
)
if sanitized_name not in seen_host_path_volumes: if sanitized_name not in seen_host_path_volumes:
seen_host_path_volumes.add(sanitized_name) seen_host_path_volumes.add(sanitized_name)
# Create hostPath volume for mount inside kind node # Create hostPath volume for mount inside kind node
kind_mount_path = get_kind_host_path_mount_path( kind_mount_path = get_kind_host_path_mount_path(sanitized_name)
sanitized_name
)
host_path_source = client.V1HostPathVolumeSource( host_path_source = client.V1HostPathVolumeSource(
path=kind_mount_path, type="FileOrCreate" path=kind_mount_path, type="FileOrCreate"
) )
@ -585,18 +569,14 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
deployment_id = deployment_context.id deployment_id = deployment_context.id
backup_subdir = f"cluster-backups/{deployment_id}" backup_subdir = f"cluster-backups/{deployment_id}"
etcd_host_path = _make_absolute_host_path( etcd_host_path = _make_absolute_host_path(Path(f"./data/{backup_subdir}/etcd"), deployment_dir)
Path(f"./data/{backup_subdir}/etcd"), deployment_dir
)
volume_definitions.append( volume_definitions.append(
f" - hostPath: {etcd_host_path}\n" f" - hostPath: {etcd_host_path}\n"
f" containerPath: /var/lib/etcd\n" f" containerPath: /var/lib/etcd\n"
f" propagation: HostToContainer\n" f" propagation: HostToContainer\n"
) )
pki_host_path = _make_absolute_host_path( pki_host_path = _make_absolute_host_path(Path(f"./data/{backup_subdir}/pki"), deployment_dir)
Path(f"./data/{backup_subdir}/pki"), deployment_dir
)
volume_definitions.append( volume_definitions.append(
f" - hostPath: {pki_host_path}\n" f" - hostPath: {pki_host_path}\n"
f" containerPath: /etc/kubernetes/pki\n" f" containerPath: /etc/kubernetes/pki\n"
@ -626,18 +606,12 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
if is_host_path_mount(volume_name): if is_host_path_mount(volume_name):
# Host path mount - add extraMount for kind # Host path mount - add extraMount for kind
sanitized_name = sanitize_host_path_to_volume_name( sanitized_name = sanitize_host_path_to_volume_name(volume_name)
volume_name
)
if sanitized_name not in seen_host_path_mounts: if sanitized_name not in seen_host_path_mounts:
seen_host_path_mounts.add(sanitized_name) seen_host_path_mounts.add(sanitized_name)
# Resolve path relative to compose directory # Resolve path relative to compose directory
host_path = resolve_host_path_for_kind( host_path = resolve_host_path_for_kind(volume_name, deployment_dir)
volume_name, deployment_dir container_path = get_kind_host_path_mount_path(sanitized_name)
)
container_path = get_kind_host_path_mount_path(
sanitized_name
)
volume_definitions.append( volume_definitions.append(
f" - hostPath: {host_path}\n" f" - hostPath: {host_path}\n"
f" containerPath: {container_path}\n" f" containerPath: {container_path}\n"
@ -651,10 +625,7 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
print(f"volume_name: {volume_name}") print(f"volume_name: {volume_name}")
print(f"map: {volume_host_path_map}") print(f"map: {volume_host_path_map}")
print(f"mount path: {mount_path}") print(f"mount path: {mount_path}")
if ( if volume_name not in deployment_context.spec.get_configmaps():
volume_name
not in deployment_context.spec.get_configmaps()
):
if ( if (
volume_name in volume_host_path_map volume_name in volume_host_path_map
and volume_host_path_map[volume_name] and volume_host_path_map[volume_name]
@ -663,9 +634,7 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
volume_host_path_map[volume_name], volume_host_path_map[volume_name],
deployment_dir, deployment_dir,
) )
container_path = get_kind_pv_bind_mount_path( container_path = get_kind_pv_bind_mount_path(volume_name)
volume_name
)
volume_definitions.append( volume_definitions.append(
f" - hostPath: {host_path}\n" f" - hostPath: {host_path}\n"
f" containerPath: {container_path}\n" f" containerPath: {container_path}\n"
@ -693,8 +662,7 @@ def _generate_kind_port_mappings_from_services(parsed_pod_files):
# TODO handle the complex cases # TODO handle the complex cases
# Looks like: 80 or something more complicated # Looks like: 80 or something more complicated
port_definitions.append( port_definitions.append(
f" - containerPort: {port_string}\n" f" - containerPort: {port_string}\n" f" hostPort: {port_string}\n"
f" hostPort: {port_string}\n"
) )
return ( return (
"" ""
@ -707,9 +675,7 @@ def _generate_kind_port_mappings(parsed_pod_files):
port_definitions = [] port_definitions = []
# Map port 80 and 443 for the Caddy ingress controller (HTTPS support) # Map port 80 and 443 for the Caddy ingress controller (HTTPS support)
for port_string in ["80", "443"]: for port_string in ["80", "443"]:
port_definitions.append( port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n")
f" - containerPort: {port_string}\n hostPort: {port_string}\n"
)
return ( return (
"" ""
if len(port_definitions) == 0 if len(port_definitions) == 0
@ -903,9 +869,7 @@ def generate_cri_base_json():
return generate_high_memlock_spec_json() return generate_high_memlock_spec_json()
def _generate_containerd_config_patches( def _generate_containerd_config_patches(deployment_dir: Path, has_high_memlock: bool) -> str:
deployment_dir: Path, has_high_memlock: bool
) -> str:
"""Generate containerdConfigPatches YAML for custom runtime handlers. """Generate containerdConfigPatches YAML for custom runtime handlers.
This configures containerd to have a runtime handler named 'high-memlock' This configures containerd to have a runtime handler named 'high-memlock'
@ -932,9 +896,7 @@ def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]:
return result return result
def _expand_shell_vars( def _expand_shell_vars(raw_val: str, env_map: Mapping[str, str] | None = None) -> str:
raw_val: str, env_map: Optional[Mapping[str, str]] = None
) -> str:
# Expand docker-compose style variable substitution: # Expand docker-compose style variable substitution:
# ${VAR} - use VAR value or empty string # ${VAR} - use VAR value or empty string
# ${VAR:-default} - use VAR value or default if unset/empty # ${VAR:-default} - use VAR value or default if unset/empty
@ -959,7 +921,7 @@ def _expand_shell_vars(
def envs_from_compose_file( def envs_from_compose_file(
compose_file_envs: Mapping[str, str], env_map: Optional[Mapping[str, str]] = None compose_file_envs: Mapping[str, str], env_map: Mapping[str, str] | None = None
) -> Mapping[str, str]: ) -> Mapping[str, str]:
result = {} result = {}
for env_var, env_val in compose_file_envs.items(): for env_var, env_val in compose_file_envs.items():
@ -969,7 +931,7 @@ def envs_from_compose_file(
def translate_sidecar_service_names( def translate_sidecar_service_names(
envs: Mapping[str, str], sibling_service_names: List[str] envs: Mapping[str, str], sibling_service_names: list[str]
) -> Mapping[str, str]: ) -> Mapping[str, str]:
"""Translate docker-compose service names to localhost for sidecar containers. """Translate docker-compose service names to localhost for sidecar containers.
@ -996,7 +958,12 @@ def translate_sidecar_service_names(
# Handle URLs like: postgres://user:pass@db:5432/dbname # Handle URLs like: postgres://user:pass@db:5432/dbname
# and simple refs like: db:5432 or just db # and simple refs like: db:5432 or just db
pattern = rf"\b{re.escape(service_name)}(:\d+)?\b" pattern = rf"\b{re.escape(service_name)}(:\d+)?\b"
new_val = re.sub(pattern, lambda m: f'localhost{m.group(1) or ""}', new_val)
def _replace_with_localhost(m: re.Match[str]) -> str:
port: str = m.group(1) or ""
return "localhost" + port
new_val = re.sub(pattern, _replace_with_localhost, new_val)
result[env_var] = new_val result[env_var] = new_val
@ -1004,8 +971,8 @@ def translate_sidecar_service_names(
def envs_from_environment_variables_map( def envs_from_environment_variables_map(
map: Mapping[str, str] map: Mapping[str, str],
) -> List[client.V1EnvVar]: ) -> list[client.V1EnvVar]:
result = [] result = []
for env_var, env_val in map.items(): for env_var, env_val in map.items():
result.append(client.V1EnvVar(env_var, env_val)) result.append(client.V1EnvVar(env_var, env_val))
@ -1036,17 +1003,13 @@ def generate_kind_config(deployment_dir: Path, deployment_context):
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()] pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files) parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map) port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
mounts_yml = _generate_kind_mounts( mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir, deployment_context)
parsed_pod_files_map, deployment_dir, deployment_context
)
# Check if unlimited_memlock is enabled # Check if unlimited_memlock is enabled
unlimited_memlock = deployment_context.spec.get_unlimited_memlock() unlimited_memlock = deployment_context.spec.get_unlimited_memlock()
# Generate containerdConfigPatches for RuntimeClass support # Generate containerdConfigPatches for RuntimeClass support
containerd_patches_yml = _generate_containerd_config_patches( containerd_patches_yml = _generate_containerd_config_patches(deployment_dir, unlimited_memlock)
deployment_dir, unlimited_memlock
)
# Add high-memlock spec file mount if needed # Add high-memlock spec file mount if needed
if unlimited_memlock: if unlimited_memlock:

View File

@ -14,19 +14,18 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import typing import typing
from typing import Optional
import humanfriendly
from pathlib import Path from pathlib import Path
from stack_orchestrator.util import get_yaml import humanfriendly
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.util import get_yaml
class ResourceLimits: class ResourceLimits:
cpus: Optional[float] = None cpus: float | None = None
memory: Optional[int] = None memory: int | None = None
storage: Optional[int] = None storage: int | None = None
def __init__(self, obj=None): def __init__(self, obj=None):
if obj is None: if obj is None:
@ -50,8 +49,8 @@ class ResourceLimits:
class Resources: class Resources:
limits: Optional[ResourceLimits] = None limits: ResourceLimits | None = None
reservations: Optional[ResourceLimits] = None reservations: ResourceLimits | None = None
def __init__(self, obj=None): def __init__(self, obj=None):
if obj is None: if obj is None:
@ -74,9 +73,9 @@ class Resources:
class Spec: class Spec:
obj: typing.Any obj: typing.Any
file_path: Optional[Path] file_path: Path | None
def __init__(self, file_path: Optional[Path] = None, obj=None) -> None: def __init__(self, file_path: Path | None = None, obj=None) -> None:
if obj is None: if obj is None:
obj = {} obj = {}
self.file_path = file_path self.file_path = file_path
@ -92,13 +91,13 @@ class Spec:
return self.obj.get(item, default) return self.obj.get(item, default)
def init_from_file(self, file_path: Path): def init_from_file(self, file_path: Path):
self.obj = get_yaml().load(open(file_path, "r")) self.obj = get_yaml().load(open(file_path))
self.file_path = file_path self.file_path = file_path
def get_image_registry(self): def get_image_registry(self):
return self.obj.get(constants.image_registry_key) return self.obj.get(constants.image_registry_key)
def get_image_registry_config(self) -> typing.Optional[typing.Dict]: def get_image_registry_config(self) -> dict | None:
"""Returns registry auth config: {server, username, token-env}. """Returns registry auth config: {server, username, token-env}.
Used for private container registries like GHCR. The token-env field Used for private container registries like GHCR. The token-env field
@ -107,7 +106,8 @@ class Spec:
Note: Uses 'registry-credentials' key to avoid collision with Note: Uses 'registry-credentials' key to avoid collision with
'image-registry' key which is for pushing images. 'image-registry' key which is for pushing images.
""" """
return self.obj.get("registry-credentials") result: dict[str, str] | None = self.obj.get("registry-credentials")
return result
def get_volumes(self): def get_volumes(self):
return self.obj.get(constants.volumes_key, {}) return self.obj.get(constants.volumes_key, {})
@ -116,35 +116,25 @@ class Spec:
return self.obj.get(constants.configmaps_key, {}) return self.obj.get(constants.configmaps_key, {})
def get_container_resources(self): def get_container_resources(self):
return Resources( return Resources(self.obj.get(constants.resources_key, {}).get("containers", {}))
self.obj.get(constants.resources_key, {}).get("containers", {})
)
def get_container_resources_for( def get_container_resources_for(self, container_name: str) -> Resources | None:
self, container_name: str
) -> typing.Optional[Resources]:
"""Look up per-container resource overrides from spec.yml. """Look up per-container resource overrides from spec.yml.
Checks resources.containers.<container_name> in the spec. Returns None Checks resources.containers.<container_name> in the spec. Returns None
if no per-container override exists (caller falls back to other sources). if no per-container override exists (caller falls back to other sources).
""" """
containers_block = self.obj.get(constants.resources_key, {}).get( containers_block = self.obj.get(constants.resources_key, {}).get("containers", {})
"containers", {}
)
if container_name in containers_block: if container_name in containers_block:
entry = containers_block[container_name] entry = containers_block[container_name]
# Only treat it as a per-container override if it's a dict with # Only treat it as a per-container override if it's a dict with
# reservations/limits nested inside (not a top-level global key) # reservations/limits nested inside (not a top-level global key)
if isinstance(entry, dict) and ( if isinstance(entry, dict) and ("reservations" in entry or "limits" in entry):
"reservations" in entry or "limits" in entry
):
return Resources(entry) return Resources(entry)
return None return None
def get_volume_resources(self): def get_volume_resources(self):
return Resources( return Resources(self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {}))
self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {})
)
def get_http_proxy(self): def get_http_proxy(self):
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, []) return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
@ -167,9 +157,7 @@ class Spec:
def get_privileged(self): def get_privileged(self):
return ( return (
"true" "true"
== str( == str(self.obj.get(constants.security_key, {}).get("privileged", "false")).lower()
self.obj.get(constants.security_key, {}).get("privileged", "false")
).lower()
) )
def get_capabilities(self): def get_capabilities(self):
@ -196,9 +184,7 @@ class Spec:
Runtime class name string, or None to use default runtime. Runtime class name string, or None to use default runtime.
""" """
# Explicit runtime class takes precedence # Explicit runtime class takes precedence
explicit = self.obj.get(constants.security_key, {}).get( explicit = self.obj.get(constants.security_key, {}).get(constants.runtime_class_key, None)
constants.runtime_class_key, None
)
if explicit: if explicit:
return explicit return explicit

View File

@ -13,8 +13,9 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from pathlib import Path
import typing import typing
from pathlib import Path
from stack_orchestrator.util import get_yaml from stack_orchestrator.util import get_yaml
@ -26,4 +27,4 @@ class Stack:
self.name = name self.name = name
def init_from_file(self, file_path: Path): def init_from_file(self, file_path: Path):
self.obj = get_yaml().load(open(file_path, "r")) self.obj = get_yaml().load(open(file_path))

View File

@ -13,23 +13,22 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os import os
from pathlib import Path from pathlib import Path
from urllib.parse import urlparse
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
import click
from stack_orchestrator.util import error_exit, global_options2
from stack_orchestrator.deploy.deployment_create import init_operation, create_operation
from stack_orchestrator.deploy.deploy import create_deploy_context from stack_orchestrator.deploy.deploy import create_deploy_context
from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deployment_create import create_operation, init_operation
from stack_orchestrator.util import error_exit, global_options2
def _fixup_container_tag(deployment_dir: str, image: str): def _fixup_container_tag(deployment_dir: str, image: str):
deployment_dir_path = Path(deployment_dir) deployment_dir_path = Path(deployment_dir)
compose_file = deployment_dir_path.joinpath( compose_file = deployment_dir_path.joinpath("compose", "docker-compose-webapp-template.yml")
"compose", "docker-compose-webapp-template.yml"
)
# replace "cerc/webapp-container:local" in the file with our image tag # replace "cerc/webapp-container:local" in the file with our image tag
with open(compose_file) as rfile: with open(compose_file) as rfile:
contents = rfile.read() contents = rfile.read()
@ -56,9 +55,7 @@ def _fixup_url_spec(spec_file_name: str, url: str):
wfile.write(contents) wfile.write(contents)
def create_deployment( def create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
ctx, deployment_dir, image, url, kube_config, image_registry, env_file
):
# Do the equivalent of: # Do the equivalent of:
# 1. laconic-so --stack webapp-template deploy --deploy-to k8s init \ # 1. laconic-so --stack webapp-template deploy --deploy-to k8s init \
# --output webapp-spec.yml # --output webapp-spec.yml
@ -117,9 +114,7 @@ def command(ctx):
"--image-registry", "--image-registry",
help="Provide a container image registry url for this k8s cluster", help="Provide a container image registry url for this k8s cluster",
) )
@click.option( @click.option("--deployment-dir", help="Create deployment files in this directory", required=True)
"--deployment-dir", help="Create deployment files in this directory", required=True
)
@click.option("--image", help="image to deploy", required=True) @click.option("--image", help="image to deploy", required=True)
@click.option("--url", help="url to serve", required=True) @click.option("--url", help="url to serve", required=True)
@click.option("--env-file", help="environment file for webapp") @click.option("--env-file", help="environment file for webapp")
@ -127,6 +122,4 @@ def command(ctx):
def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file): def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
"""create a deployment for the specified webapp container""" """create a deployment for the specified webapp container"""
return create_deployment( return create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file)
ctx, deployment_dir, image, url, kube_config, image_registry, env_file
)

View File

@ -21,10 +21,10 @@ import sys
import tempfile import tempfile
import time import time
import uuid import uuid
import yaml
import click import click
import gnupg import gnupg
import yaml
from stack_orchestrator.deploy.images import remote_image_exists from stack_orchestrator.deploy.images import remote_image_exists
from stack_orchestrator.deploy.webapp import deploy_webapp from stack_orchestrator.deploy.webapp import deploy_webapp
@ -34,16 +34,16 @@ from stack_orchestrator.deploy.webapp.util import (
TimedLogger, TimedLogger,
build_container_image, build_container_image,
confirm_auction, confirm_auction,
push_container_image,
file_hash,
deploy_to_k8s,
publish_deployment,
hostname_for_deployment_request,
generate_hostname_for_app,
match_owner,
skip_by_tag,
confirm_payment, confirm_payment,
deploy_to_k8s,
file_hash,
generate_hostname_for_app,
hostname_for_deployment_request,
load_known_requests, load_known_requests,
match_owner,
publish_deployment,
push_container_image,
skip_by_tag,
) )
@ -70,9 +70,7 @@ def process_app_deployment_request(
logger.log("BEGIN - process_app_deployment_request") logger.log("BEGIN - process_app_deployment_request")
# 1. look up application # 1. look up application
app = laconic.get_record( app = laconic.get_record(app_deployment_request.attributes.application, require=True)
app_deployment_request.attributes.application, require=True
)
assert app is not None # require=True ensures this assert app is not None # require=True ensures this
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}") logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
@ -84,9 +82,7 @@ def process_app_deployment_request(
if "allow" == fqdn_policy or "preexisting" == fqdn_policy: if "allow" == fqdn_policy or "preexisting" == fqdn_policy:
fqdn = requested_name fqdn = requested_name
else: else:
raise Exception( raise Exception(f"{requested_name} is invalid: only unqualified hostnames are allowed.")
f"{requested_name} is invalid: only unqualified hostnames are allowed."
)
else: else:
fqdn = f"{requested_name}.{default_dns_suffix}" fqdn = f"{requested_name}.{default_dns_suffix}"
@ -108,8 +104,7 @@ def process_app_deployment_request(
logger.log(f"Matched DnsRecord ownership: {matched_owner}") logger.log(f"Matched DnsRecord ownership: {matched_owner}")
else: else:
raise Exception( raise Exception(
"Unable to confirm ownership of DnsRecord %s for request %s" f"Unable to confirm ownership of DnsRecord {dns_lrn} for request {app_deployment_request.id}"
% (dns_lrn, app_deployment_request.id)
) )
elif "preexisting" == fqdn_policy: elif "preexisting" == fqdn_policy:
raise Exception( raise Exception(
@ -144,7 +139,7 @@ def process_app_deployment_request(
env_filename = tempfile.mktemp() env_filename = tempfile.mktemp()
with open(env_filename, "w") as file: with open(env_filename, "w") as file:
for k, v in env.items(): for k, v in env.items():
file.write("%s=%s\n" % (k, shlex.quote(str(v)))) file.write(f"{k}={shlex.quote(str(v))}\n")
# 5. determine new or existing deployment # 5. determine new or existing deployment
# a. check for deployment lrn # a. check for deployment lrn
@ -153,8 +148,7 @@ def process_app_deployment_request(
app_deployment_lrn = app_deployment_request.attributes.deployment app_deployment_lrn = app_deployment_request.attributes.deployment
if not app_deployment_lrn.startswith(deployment_record_namespace): if not app_deployment_lrn.startswith(deployment_record_namespace):
raise Exception( raise Exception(
"Deployment LRN %s is not in a supported namespace" f"Deployment LRN {app_deployment_request.attributes.deployment} is not in a supported namespace"
% app_deployment_request.attributes.deployment
) )
deployment_record = laconic.get_record(app_deployment_lrn) deployment_record = laconic.get_record(app_deployment_lrn)
@ -165,14 +159,14 @@ def process_app_deployment_request(
# already-unique deployment id # already-unique deployment id
unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16] unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
deployment_config_file = os.path.join(deployment_dir, "config.env") deployment_config_file = os.path.join(deployment_dir, "config.env")
deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id deployment_container_tag = f"laconic-webapp/{unique_deployment_id}:local"
app_image_shared_tag = f"laconic-webapp/{app.id}:local" app_image_shared_tag = f"laconic-webapp/{app.id}:local"
# b. check for deployment directory (create if necessary) # b. check for deployment directory (create if necessary)
if not os.path.exists(deployment_dir): if not os.path.exists(deployment_dir):
if deployment_record: if deployment_record:
raise Exception( raise Exception(
"Deployment record %s exists, but not deployment dir %s. " f"Deployment record {app_deployment_lrn} exists, but not deployment dir {deployment_dir}. "
"Please remove name." % (app_deployment_lrn, deployment_dir) "Please remove name."
) )
logger.log( logger.log(
f"Creating webapp deployment in: {deployment_dir} " f"Creating webapp deployment in: {deployment_dir} "
@ -198,11 +192,7 @@ def process_app_deployment_request(
) )
# 6. build container (if needed) # 6. build container (if needed)
# TODO: add a comment that explains what this code is doing (not clear to me) # TODO: add a comment that explains what this code is doing (not clear to me)
if ( if not deployment_record or deployment_record.attributes.application != app.id or force_rebuild:
not deployment_record
or deployment_record.attributes.application != app.id
or force_rebuild
):
needs_k8s_deploy = True needs_k8s_deploy = True
# check if the image already exists # check if the image already exists
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag) shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
@ -224,11 +214,9 @@ def process_app_deployment_request(
# ) # )
logger.log("Tag complete") logger.log("Tag complete")
else: else:
extra_build_args = [] # TODO: pull from request extra_build_args: list[str] = [] # TODO: pull from request
logger.log(f"Building container image: {deployment_container_tag}") logger.log(f"Building container image: {deployment_container_tag}")
build_container_image( build_container_image(app, deployment_container_tag, extra_build_args, logger)
app, deployment_container_tag, extra_build_args, logger
)
logger.log("Build complete") logger.log("Build complete")
logger.log(f"Pushing container image: {deployment_container_tag}") logger.log(f"Pushing container image: {deployment_container_tag}")
push_container_image(deployment_dir, logger) push_container_image(deployment_dir, logger)
@ -287,9 +275,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
@click.command() @click.command()
@click.option("--kube-config", help="Provide a config file for a k8s deployment") @click.option("--kube-config", help="Provide a config file for a k8s deployment")
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True
)
@click.option( @click.option(
"--image-registry", "--image-registry",
help="Provide a container image registry url for this k8s cluster", help="Provide a container image registry url for this k8s cluster",
@ -306,9 +292,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
is_flag=True, is_flag=True,
default=False, default=False,
) )
@click.option( @click.option("--state-file", help="File to store state about previously seen requests.")
"--state-file", help="File to store state about previously seen requests."
)
@click.option( @click.option(
"--only-update-state", "--only-update-state",
help="Only update the state file, don't process any requests anything.", help="Only update the state file, don't process any requests anything.",
@ -331,9 +315,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
help="eg, lrn://laconic/deployments", help="eg, lrn://laconic/deployments",
required=True, required=True,
) )
@click.option( @click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
)
@click.option( @click.option(
"--include-tags", "--include-tags",
help="Only include requests with matching tags (comma-separated).", help="Only include requests with matching tags (comma-separated).",
@ -344,17 +326,13 @@ def dump_known_requests(filename, requests, status="SEEN"):
help="Exclude requests with matching tags (comma-separated).", help="Exclude requests with matching tags (comma-separated).",
default="", default="",
) )
@click.option( @click.option("--force-rebuild", help="Rebuild even if the image already exists.", is_flag=True)
"--force-rebuild", help="Rebuild even if the image already exists.", is_flag=True
)
@click.option( @click.option(
"--recreate-on-deploy", "--recreate-on-deploy",
help="Remove and recreate deployments instead of updating them.", help="Remove and recreate deployments instead of updating them.",
is_flag=True, is_flag=True,
) )
@click.option( @click.option("--log-dir", help="Output build/deployment logs to directory.", default=None)
"--log-dir", help="Output build/deployment logs to directory.", default=None
)
@click.option( @click.option(
"--min-required-payment", "--min-required-payment",
help="Requests must have a minimum payment to be processed (in alnt)", help="Requests must have a minimum payment to be processed (in alnt)",
@ -378,9 +356,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
help="The directory containing uploaded config.", help="The directory containing uploaded config.",
required=True, required=True,
) )
@click.option( @click.option("--private-key-file", help="The private key for decrypting config.", required=True)
"--private-key-file", help="The private key for decrypting config.", required=True
)
@click.option( @click.option(
"--registry-lock-file", "--registry-lock-file",
help="File path to use for registry mutex lock", help="File path to use for registry mutex lock",
@ -435,11 +411,7 @@ def command( # noqa: C901
sys.exit(2) sys.exit(2)
if not only_update_state: if not only_update_state:
if ( if not record_namespace_dns or not record_namespace_deployments or not dns_suffix:
not record_namespace_dns
or not record_namespace_deployments
or not dns_suffix
):
print( print(
"--dns-suffix, --record-namespace-dns, and " "--dns-suffix, --record-namespace-dns, and "
"--record-namespace-deployments are all required", "--record-namespace-deployments are all required",
@ -491,8 +463,7 @@ def command( # noqa: C901
if min_required_payment and not payment_address: if min_required_payment and not payment_address:
print( print(
f"Minimum payment required, but no payment address listed " f"Minimum payment required, but no payment address listed " f"for deployer: {lrn}.",
f"for deployer: {lrn}.",
file=sys.stderr, file=sys.stderr,
) )
sys.exit(2) sys.exit(2)
@ -557,26 +528,18 @@ def command( # noqa: C901
requested_name = r.attributes.dns requested_name = r.attributes.dns
if not requested_name: if not requested_name:
requested_name = generate_hostname_for_app(app) requested_name = generate_hostname_for_app(app)
main_logger.log( main_logger.log(f"Generating name {requested_name} for request {r_id}.")
"Generating name %s for request %s." % (requested_name, r_id)
)
if ( if requested_name in skipped_by_name or requested_name in requests_by_name:
requested_name in skipped_by_name main_logger.log(f"Ignoring request {r_id}, it has been superseded.")
or requested_name in requests_by_name
):
main_logger.log(
"Ignoring request %s, it has been superseded." % r_id
)
result = "SKIP" result = "SKIP"
continue continue
if skip_by_tag(r, include_tags, exclude_tags): if skip_by_tag(r, include_tags, exclude_tags):
r_tags = r.attributes.tags if r.attributes else None r_tags = r.attributes.tags if r.attributes else None
main_logger.log( main_logger.log(
"Skipping request %s, filtered by tag " f"Skipping request {r_id}, filtered by tag "
"(include %s, exclude %s, present %s)" f"(include {include_tags}, exclude {exclude_tags}, present {r_tags})"
% (r_id, include_tags, exclude_tags, r_tags)
) )
skipped_by_name[requested_name] = r skipped_by_name[requested_name] = r
result = "SKIP" result = "SKIP"
@ -584,8 +547,7 @@ def command( # noqa: C901
r_app = r.attributes.application if r.attributes else "unknown" r_app = r.attributes.application if r.attributes else "unknown"
main_logger.log( main_logger.log(
"Found pending request %s to run application %s on %s." f"Found pending request {r_id} to run application {r_app} on {requested_name}."
% (r_id, r_app, requested_name)
) )
requests_by_name[requested_name] = r requests_by_name[requested_name] = r
except Exception as e: except Exception as e:
@ -617,17 +579,14 @@ def command( # noqa: C901
requests_to_check_for_payment = [] requests_to_check_for_payment = []
for r in requests_by_name.values(): for r in requests_by_name.values():
if r.id in cancellation_requests and match_owner( if r.id in cancellation_requests and match_owner(cancellation_requests[r.id], r):
cancellation_requests[r.id], r
):
main_logger.log( main_logger.log(
f"Found deployment cancellation request for {r.id} " f"Found deployment cancellation request for {r.id} "
f"at {cancellation_requests[r.id].id}" f"at {cancellation_requests[r.id].id}"
) )
elif r.id in deployments_by_request: elif r.id in deployments_by_request:
main_logger.log( main_logger.log(
f"Found satisfied request for {r.id} " f"Found satisfied request for {r.id} " f"at {deployments_by_request[r.id].id}"
f"at {deployments_by_request[r.id].id}"
) )
else: else:
if ( if (
@ -635,8 +594,7 @@ def command( # noqa: C901
and previous_requests[r.id].get("status", "") != "RETRY" and previous_requests[r.id].get("status", "") != "RETRY"
): ):
main_logger.log( main_logger.log(
f"Skipping unsatisfied request {r.id} " f"Skipping unsatisfied request {r.id} " "because we have seen it before."
"because we have seen it before."
) )
else: else:
main_logger.log(f"Request {r.id} needs to processed.") main_logger.log(f"Request {r.id} needs to processed.")
@ -650,14 +608,10 @@ def command( # noqa: C901
main_logger.log(f"{r.id}: Auction confirmed.") main_logger.log(f"{r.id}: Auction confirmed.")
requests_to_execute.append(r) requests_to_execute.append(r)
else: else:
main_logger.log( main_logger.log(f"Skipping request {r.id}: unable to verify auction.")
f"Skipping request {r.id}: unable to verify auction."
)
dump_known_requests(state_file, [r], status="SKIP") dump_known_requests(state_file, [r], status="SKIP")
else: else:
main_logger.log( main_logger.log(f"Skipping request {r.id}: not handling requests with auction.")
f"Skipping request {r.id}: not handling requests with auction."
)
dump_known_requests(state_file, [r], status="SKIP") dump_known_requests(state_file, [r], status="SKIP")
elif min_required_payment: elif min_required_payment:
main_logger.log(f"{r.id}: Confirming payment...") main_logger.log(f"{r.id}: Confirming payment...")
@ -671,16 +625,12 @@ def command( # noqa: C901
main_logger.log(f"{r.id}: Payment confirmed.") main_logger.log(f"{r.id}: Payment confirmed.")
requests_to_execute.append(r) requests_to_execute.append(r)
else: else:
main_logger.log( main_logger.log(f"Skipping request {r.id}: unable to verify payment.")
f"Skipping request {r.id}: unable to verify payment."
)
dump_known_requests(state_file, [r], status="UNPAID") dump_known_requests(state_file, [r], status="UNPAID")
else: else:
requests_to_execute.append(r) requests_to_execute.append(r)
main_logger.log( main_logger.log(f"Found {len(requests_to_execute)} unsatisfied request(s) to process.")
"Found %d unsatisfied request(s) to process." % len(requests_to_execute)
)
if not dry_run: if not dry_run:
for r in requests_to_execute: for r in requests_to_execute:
@ -700,10 +650,8 @@ def command( # noqa: C901
if not os.path.exists(run_log_dir): if not os.path.exists(run_log_dir):
os.mkdir(run_log_dir) os.mkdir(run_log_dir)
run_log_file_path = os.path.join(run_log_dir, f"{run_id}.log") run_log_file_path = os.path.join(run_log_dir, f"{run_id}.log")
main_logger.log( main_logger.log(f"Directing deployment logs to: {run_log_file_path}")
f"Directing deployment logs to: {run_log_file_path}" run_log_file = open(run_log_file_path, "w")
)
run_log_file = open(run_log_file_path, "wt")
run_reg_client = LaconicRegistryClient( run_reg_client = LaconicRegistryClient(
laconic_config, laconic_config,
log_file=run_log_file, log_file=run_log_file,

View File

@ -12,18 +12,18 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import sys
import json import json
import sys
import click import click
from stack_orchestrator.deploy.webapp.util import ( from stack_orchestrator.deploy.webapp.util import (
AUCTION_KIND_PROVIDER,
AttrDict, AttrDict,
AuctionStatus,
LaconicRegistryClient, LaconicRegistryClient,
TimedLogger, TimedLogger,
load_known_requests, load_known_requests,
AUCTION_KIND_PROVIDER,
AuctionStatus,
) )
@ -44,16 +44,13 @@ def process_app_deployment_auction(
# Check auction kind # Check auction kind
if auction.kind != AUCTION_KIND_PROVIDER: if auction.kind != AUCTION_KIND_PROVIDER:
raise Exception( raise Exception(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}")
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}"
)
if current_status == "PENDING": if current_status == "PENDING":
# Skip if pending auction not in commit state # Skip if pending auction not in commit state
if auction.status != AuctionStatus.COMMIT: if auction.status != AuctionStatus.COMMIT:
logger.log( logger.log(
f"Skipping pending request, auction {auction_id} " f"Skipping pending request, auction {auction_id} " f"status: {auction.status}"
f"status: {auction.status}"
) )
return "SKIP", "" return "SKIP", ""
@ -115,9 +112,7 @@ def dump_known_auction_requests(filename, requests, status="SEEN"):
@click.command() @click.command()
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True
)
@click.option( @click.option(
"--state-file", "--state-file",
help="File to store state about previously seen auction requests.", help="File to store state about previously seen auction requests.",
@ -133,9 +128,7 @@ def dump_known_auction_requests(filename, requests, status="SEEN"):
help="File path to use for registry mutex lock", help="File path to use for registry mutex lock",
default=None, default=None,
) )
@click.option( @click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
)
@click.pass_context @click.pass_context
def command( def command(
ctx, ctx,
@ -198,8 +191,7 @@ def command(
continue continue
logger.log( logger.log(
f"Found pending auction request {r.id} for application " f"Found pending auction request {r.id} for application " f"{application}."
f"{application}."
) )
# Add requests to be processed # Add requests to be processed
@ -209,9 +201,7 @@ def command(
result_status = "ERROR" result_status = "ERROR"
logger.log(f"ERROR: examining request {r.id}: " + str(e)) logger.log(f"ERROR: examining request {r.id}: " + str(e))
finally: finally:
logger.log( logger.log(f"DONE: Examining request {r.id} with result {result_status}.")
f"DONE: Examining request {r.id} with result {result_status}."
)
if result_status in ["ERROR"]: if result_status in ["ERROR"]:
dump_known_auction_requests( dump_known_auction_requests(
state_file, state_file,

View File

@ -30,9 +30,7 @@ def fatal(msg: str):
@click.command() @click.command()
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True
)
@click.option( @click.option(
"--app", "--app",
help="The LRN of the application to deploy.", help="The LRN of the application to deploy.",

View File

@ -13,28 +13,24 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import base64 import base64
import click
import sys import sys
import yaml
from urllib.parse import urlparse from urllib.parse import urlparse
import click
import yaml
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
@click.command() @click.command()
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True
)
@click.option("--api-url", help="The API URL of the deployer.", required=True) @click.option("--api-url", help="The API URL of the deployer.", required=True)
@click.option( @click.option(
"--public-key-file", "--public-key-file",
help="The public key to use. This should be a binary file.", help="The public key to use. This should be a binary file.",
required=True, required=True,
) )
@click.option( @click.option("--lrn", help="eg, lrn://laconic/deployers/my.deployer.name", required=True)
"--lrn", help="eg, lrn://laconic/deployers/my.deployer.name", required=True
)
@click.option( @click.option(
"--payment-address", "--payment-address",
help="The address to which payments should be made. " help="The address to which payments should be made. "
@ -84,9 +80,7 @@ def command( # noqa: C901
} }
if min_required_payment: if min_required_payment:
webapp_deployer_record["record"][ webapp_deployer_record["record"]["minimumPayment"] = f"{min_required_payment}alnt"
"minimumPayment"
] = f"{min_required_payment}alnt"
if dry_run: if dry_run:
yaml.dump(webapp_deployer_record, sys.stdout) yaml.dump(webapp_deployer_record, sys.stdout)

View File

@ -1,6 +1,6 @@
from functools import wraps
import os import os
import time import time
from functools import wraps
# Define default file path for the lock # Define default file path for the lock
DEFAULT_LOCK_FILE_PATH = "/tmp/registry_mutex_lock_file" DEFAULT_LOCK_FILE_PATH = "/tmp/registry_mutex_lock_file"
@ -17,7 +17,7 @@ def acquire_lock(client, lock_file_path, timeout):
try: try:
# Check if lock file exists and is potentially stale # Check if lock file exists and is potentially stale
if os.path.exists(lock_file_path): if os.path.exists(lock_file_path):
with open(lock_file_path, "r") as lock_file: with open(lock_file_path) as lock_file:
timestamp = float(lock_file.read().strip()) timestamp = float(lock_file.read().strip())
# If lock is stale, remove the lock file # If lock is stale, remove the lock file
@ -25,9 +25,7 @@ def acquire_lock(client, lock_file_path, timeout):
print(f"Stale lock detected, removing lock file {lock_file_path}") print(f"Stale lock detected, removing lock file {lock_file_path}")
os.remove(lock_file_path) os.remove(lock_file_path)
else: else:
print( print(f"Lock file {lock_file_path} exists and is recent, waiting...")
f"Lock file {lock_file_path} exists and is recent, waiting..."
)
time.sleep(LOCK_RETRY_INTERVAL) time.sleep(LOCK_RETRY_INTERVAL)
continue continue

View File

@ -12,24 +12,24 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import base64
import shutil import shutil
import sys import sys
import tempfile import tempfile
from datetime import datetime from datetime import datetime
from typing import NoReturn from typing import NoReturn
import base64
import gnupg
import click import click
import gnupg
import requests import requests
import yaml import yaml
from dotenv import dotenv_values
from stack_orchestrator.deploy.webapp.util import ( from stack_orchestrator.deploy.webapp.util import (
AUCTION_KIND_PROVIDER, AUCTION_KIND_PROVIDER,
AuctionStatus, AuctionStatus,
LaconicRegistryClient, LaconicRegistryClient,
) )
from dotenv import dotenv_values
def fatal(msg: str) -> NoReturn: def fatal(msg: str) -> NoReturn:
@ -38,9 +38,7 @@ def fatal(msg: str) -> NoReturn:
@click.command() @click.command()
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True
)
@click.option( @click.option(
"--app", "--app",
help="The LRN of the application to deploy.", help="The LRN of the application to deploy.",
@ -63,9 +61,7 @@ def fatal(msg: str) -> NoReturn:
"'auto' to use the deployer's minimum required payment." "'auto' to use the deployer's minimum required payment."
), ),
) )
@click.option( @click.option("--use-payment", help="The TX id of an existing, unused payment", default=None)
"--use-payment", help="The TX id of an existing, unused payment", default=None
)
@click.option("--dns", help="the DNS name to request (default is autogenerated)") @click.option("--dns", help="the DNS name to request (default is autogenerated)")
@click.option( @click.option(
"--dry-run", "--dry-run",
@ -144,9 +140,7 @@ def command( # noqa: C901
# Check auction kind # Check auction kind
auction_kind = auction.kind if auction else None auction_kind = auction.kind if auction else None
if auction_kind != AUCTION_KIND_PROVIDER: if auction_kind != AUCTION_KIND_PROVIDER:
fatal( fatal(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction_kind}")
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction_kind}"
)
# Check auction status # Check auction status
auction_status = auction.status if auction else None auction_status = auction.status if auction else None
@ -163,14 +157,9 @@ def command( # noqa: C901
# Get deployer record for all the auction winners # Get deployer record for all the auction winners
for auction_winner in auction_winners: for auction_winner in auction_winners:
# TODO: Match auction winner address with provider address? # TODO: Match auction winner address with provider address?
deployer_records_by_owner = laconic.webapp_deployers( deployer_records_by_owner = laconic.webapp_deployers({"paymentAddress": auction_winner})
{"paymentAddress": auction_winner}
)
if len(deployer_records_by_owner) == 0: if len(deployer_records_by_owner) == 0:
print( print(f"WARNING: Unable to locate deployer for auction winner " f"{auction_winner}")
f"WARNING: Unable to locate deployer for auction winner "
f"{auction_winner}"
)
# Take first record with name set # Take first record with name set
target_deployer_record = deployer_records_by_owner[0] target_deployer_record = deployer_records_by_owner[0]
@ -196,9 +185,7 @@ def command( # noqa: C901
gpg = gnupg.GPG(gnupghome=tempdir) gpg = gnupg.GPG(gnupghome=tempdir)
# Import the deployer's public key # Import the deployer's public key
result = gpg.import_keys( result = gpg.import_keys(base64.b64decode(deployer_record.attributes.publicKey))
base64.b64decode(deployer_record.attributes.publicKey)
)
if 1 != result.imported: if 1 != result.imported:
fatal("Failed to import deployer's public key.") fatal("Failed to import deployer's public key.")
@ -237,15 +224,9 @@ def command( # noqa: C901
if (not deployer) and len(deployer_record.names): if (not deployer) and len(deployer_record.names):
target_deployer = deployer_record.names[0] target_deployer = deployer_record.names[0]
app_name = ( app_name = app_record.attributes.name if app_record and app_record.attributes else "unknown"
app_record.attributes.name
if app_record and app_record.attributes
else "unknown"
)
app_version = ( app_version = (
app_record.attributes.version app_record.attributes.version if app_record and app_record.attributes else "unknown"
if app_record and app_record.attributes
else "unknown"
) )
deployment_request = { deployment_request = {
"record": { "record": {
@ -273,15 +254,11 @@ def command( # noqa: C901
deployment_request["record"]["payment"] = "DRY_RUN" deployment_request["record"]["payment"] = "DRY_RUN"
elif "auto" == make_payment: elif "auto" == make_payment:
if "minimumPayment" in deployer_record.attributes: if "minimumPayment" in deployer_record.attributes:
amount = int( amount = int(deployer_record.attributes.minimumPayment.replace("alnt", ""))
deployer_record.attributes.minimumPayment.replace("alnt", "")
)
else: else:
amount = make_payment amount = make_payment
if amount: if amount:
receipt = laconic.send_tokens( receipt = laconic.send_tokens(deployer_record.attributes.paymentAddress, amount)
deployer_record.attributes.paymentAddress, amount
)
deployment_request["record"]["payment"] = receipt.tx.hash deployment_request["record"]["payment"] = receipt.tx.hash
print("Payment TX:", receipt.tx.hash) print("Payment TX:", receipt.tx.hash)
elif use_payment: elif use_payment:

View File

@ -26,12 +26,8 @@ def fatal(msg: str) -> None:
@click.command() @click.command()
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True @click.option("--deployer", help="The LRN of the deployer to process this request.", required=True)
)
@click.option(
"--deployer", help="The LRN of the deployer to process this request.", required=True
)
@click.option( @click.option(
"--deployment", "--deployment",
help="Deployment record (ApplicationDeploymentRecord) id of the deployment.", help="Deployment record (ApplicationDeploymentRecord) id of the deployment.",
@ -44,9 +40,7 @@ def fatal(msg: str) -> None:
"'auto' to use the deployer's minimum required payment." "'auto' to use the deployer's minimum required payment."
), ),
) )
@click.option( @click.option("--use-payment", help="The TX id of an existing, unused payment", default=None)
"--use-payment", help="The TX id of an existing, unused payment", default=None
)
@click.option( @click.option(
"--dry-run", "--dry-run",
help="Don't publish anything, just report what would be done.", help="Don't publish anything, just report what would be done.",

View File

@ -22,6 +22,7 @@
# all or specific containers # all or specific containers
import hashlib import hashlib
import click import click
from dotenv import dotenv_values from dotenv import dotenv_values

View File

@ -21,11 +21,11 @@ import sys
import click import click
from stack_orchestrator.deploy.webapp.util import ( from stack_orchestrator.deploy.webapp.util import (
TimedLogger,
LaconicRegistryClient, LaconicRegistryClient,
TimedLogger,
confirm_payment,
match_owner, match_owner,
skip_by_tag, skip_by_tag,
confirm_payment,
) )
main_logger = TimedLogger(file=sys.stderr) main_logger = TimedLogger(file=sys.stderr)
@ -40,9 +40,7 @@ def process_app_removal_request(
delete_names, delete_names,
webapp_deployer_record, webapp_deployer_record,
): ):
deployment_record = laconic.get_record( deployment_record = laconic.get_record(app_removal_request.attributes.deployment, require=True)
app_removal_request.attributes.deployment, require=True
)
assert deployment_record is not None # require=True ensures this assert deployment_record is not None # require=True ensures this
assert deployment_record.attributes is not None assert deployment_record.attributes is not None
@ -50,12 +48,10 @@ def process_app_removal_request(
assert dns_record is not None # require=True ensures this assert dns_record is not None # require=True ensures this
assert dns_record.attributes is not None assert dns_record.attributes is not None
deployment_dir = os.path.join( deployment_dir = os.path.join(deployment_parent_dir, dns_record.attributes.name.lower())
deployment_parent_dir, dns_record.attributes.name.lower()
)
if not os.path.exists(deployment_dir): if not os.path.exists(deployment_dir):
raise Exception("Deployment directory %s does not exist." % deployment_dir) raise Exception(f"Deployment directory {deployment_dir} does not exist.")
# Check if the removal request is from the owner of the DnsRecord or # Check if the removal request is from the owner of the DnsRecord or
# deployment record. # deployment record.
@ -63,9 +59,7 @@ def process_app_removal_request(
# Or of the original deployment request. # Or of the original deployment request.
if not matched_owner and deployment_record.attributes.request: if not matched_owner and deployment_record.attributes.request:
original_request = laconic.get_record( original_request = laconic.get_record(deployment_record.attributes.request, require=True)
deployment_record.attributes.request, require=True
)
assert original_request is not None # require=True ensures this assert original_request is not None # require=True ensures this
matched_owner = match_owner(app_removal_request, original_request) matched_owner = match_owner(app_removal_request, original_request)
@ -75,8 +69,7 @@ def process_app_removal_request(
deployment_id = deployment_record.id if deployment_record else "unknown" deployment_id = deployment_record.id if deployment_record else "unknown"
request_id = app_removal_request.id if app_removal_request else "unknown" request_id = app_removal_request.id if app_removal_request else "unknown"
raise Exception( raise Exception(
"Unable to confirm ownership of deployment %s for removal request %s" f"Unable to confirm ownership of deployment {deployment_id} for removal request {request_id}"
% (deployment_id, request_id)
) )
# TODO(telackey): Call the function directly. The easiest way to build # TODO(telackey): Call the function directly. The easiest way to build
@ -124,7 +117,7 @@ def process_app_removal_request(
def load_known_requests(filename): def load_known_requests(filename):
if filename and os.path.exists(filename): if filename and os.path.exists(filename):
return json.load(open(filename, "r")) return json.load(open(filename))
return {} return {}
@ -138,9 +131,7 @@ def dump_known_requests(filename, requests):
@click.command() @click.command()
@click.option( @click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
"--laconic-config", help="Provide a config file for laconicd", required=True
)
@click.option( @click.option(
"--deployment-parent-dir", "--deployment-parent-dir",
help="Create deployment directories beneath this directory", help="Create deployment directories beneath this directory",
@ -153,9 +144,7 @@ def dump_known_requests(filename, requests):
is_flag=True, is_flag=True,
default=False, default=False,
) )
@click.option( @click.option("--state-file", help="File to store state about previously seen requests.")
"--state-file", help="File to store state about previously seen requests."
)
@click.option( @click.option(
"--only-update-state", "--only-update-state",
help="Only update the state file, don't process any requests anything.", help="Only update the state file, don't process any requests anything.",
@ -166,12 +155,8 @@ def dump_known_requests(filename, requests):
help="Delete all names associated with removed deployments.", help="Delete all names associated with removed deployments.",
default=True, default=True,
) )
@click.option( @click.option("--delete-volumes/--preserve-volumes", default=True, help="delete data volumes")
"--delete-volumes/--preserve-volumes", default=True, help="delete data volumes" @click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
)
@click.option(
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
)
@click.option( @click.option(
"--include-tags", "--include-tags",
help="Only include requests with matching tags (comma-separated).", help="Only include requests with matching tags (comma-separated).",
@ -245,8 +230,7 @@ def command( # noqa: C901
if min_required_payment and not payment_address: if min_required_payment and not payment_address:
print( print(
f"Minimum payment required, but no payment address listed " f"Minimum payment required, but no payment address listed " f"for deployer: {lrn}.",
f"for deployer: {lrn}.",
file=sys.stderr, file=sys.stderr,
) )
sys.exit(2) sys.exit(2)
@ -303,9 +287,7 @@ def command( # noqa: C901
continue continue
if not r.attributes.deployment: if not r.attributes.deployment:
r_id = r.id if r else "unknown" r_id = r.id if r else "unknown"
main_logger.log( main_logger.log(f"Skipping removal request {r_id} since it was a cancellation.")
f"Skipping removal request {r_id} since it was a cancellation."
)
elif r.attributes.deployment in one_per_deployment: elif r.attributes.deployment in one_per_deployment:
r_id = r.id if r else "unknown" r_id = r.id if r else "unknown"
main_logger.log(f"Skipping removal request {r_id} since it was superseded.") main_logger.log(f"Skipping removal request {r_id} since it was superseded.")
@ -323,14 +305,12 @@ def command( # noqa: C901
) )
elif skip_by_tag(r, include_tags, exclude_tags): elif skip_by_tag(r, include_tags, exclude_tags):
main_logger.log( main_logger.log(
"Skipping removal request %s, filtered by tag " f"Skipping removal request {r.id}, filtered by tag "
"(include %s, exclude %s, present %s)" f"(include {include_tags}, exclude {exclude_tags}, present {r.attributes.tags})"
% (r.id, include_tags, exclude_tags, r.attributes.tags)
) )
elif r.id in removals_by_request: elif r.id in removals_by_request:
main_logger.log( main_logger.log(
f"Found satisfied request for {r.id} " f"Found satisfied request for {r.id} " f"at {removals_by_request[r.id].id}"
f"at {removals_by_request[r.id].id}"
) )
elif r.attributes.deployment in removals_by_deployment: elif r.attributes.deployment in removals_by_deployment:
main_logger.log( main_logger.log(
@ -344,8 +324,7 @@ def command( # noqa: C901
requests_to_check_for_payment.append(r) requests_to_check_for_payment.append(r)
else: else:
main_logger.log( main_logger.log(
f"Skipping unsatisfied request {r.id} " f"Skipping unsatisfied request {r.id} " "because we have seen it before."
"because we have seen it before."
) )
except Exception as e: except Exception as e:
main_logger.log(f"ERROR examining {r.id}: {e}") main_logger.log(f"ERROR examining {r.id}: {e}")
@ -370,9 +349,7 @@ def command( # noqa: C901
else: else:
requests_to_execute = requests_to_check_for_payment requests_to_execute = requests_to_check_for_payment
main_logger.log( main_logger.log(f"Found {len(requests_to_execute)} unsatisfied request(s) to process.")
"Found %d unsatisfied request(s) to process." % len(requests_to_execute)
)
if not dry_run: if not dry_run:
for r in requests_to_execute: for r in requests_to_execute:

View File

@ -22,10 +22,10 @@ import subprocess
import sys import sys
import tempfile import tempfile
import uuid import uuid
import yaml
from enum import Enum from enum import Enum
from typing import Any, List, Optional, TextIO from typing import Any, TextIO
import yaml
from stack_orchestrator.deploy.webapp.registry_mutex import registry_mutex from stack_orchestrator.deploy.webapp.registry_mutex import registry_mutex
@ -43,17 +43,17 @@ AUCTION_KIND_PROVIDER = "provider"
class AttrDict(dict): class AttrDict(dict):
def __init__(self, *args: Any, **kwargs: Any) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None:
super(AttrDict, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.__dict__ = self self.__dict__ = self
def __getattribute__(self, attr: str) -> Any: def __getattribute__(self, attr: str) -> Any:
__dict__ = super(AttrDict, self).__getattribute__("__dict__") __dict__ = super().__getattribute__("__dict__")
if attr in __dict__: if attr in __dict__:
v = super(AttrDict, self).__getattribute__(attr) v = super().__getattribute__(attr)
if isinstance(v, dict): if isinstance(v, dict):
return AttrDict(v) return AttrDict(v)
return v return v
return super(AttrDict, self).__getattribute__(attr) return super().__getattribute__(attr)
def __getattr__(self, attr: str) -> Any: def __getattr__(self, attr: str) -> Any:
# This method is called when attribute is not found # This method is called when attribute is not found
@ -62,15 +62,13 @@ class AttrDict(dict):
class TimedLogger: class TimedLogger:
def __init__(self, id: str = "", file: Optional[TextIO] = None) -> None: def __init__(self, id: str = "", file: TextIO | None = None) -> None:
self.start = datetime.datetime.now() self.start = datetime.datetime.now()
self.last = self.start self.last = self.start
self.id = id self.id = id
self.file = file self.file = file
def log( def log(self, msg: str, show_step_time: bool = True, show_total_time: bool = False) -> None:
self, msg: str, show_step_time: bool = True, show_total_time: bool = False
) -> None:
prefix = f"{datetime.datetime.utcnow()} - {self.id}" prefix = f"{datetime.datetime.utcnow()} - {self.id}"
if show_step_time: if show_step_time:
prefix += f" - {datetime.datetime.now() - self.last} (step)" prefix += f" - {datetime.datetime.now() - self.last} (step)"
@ -84,11 +82,11 @@ class TimedLogger:
def load_known_requests(filename): def load_known_requests(filename):
if filename and os.path.exists(filename): if filename and os.path.exists(filename):
return json.load(open(filename, "r")) return json.load(open(filename))
return {} return {}
def logged_cmd(log_file: Optional[TextIO], *vargs: str) -> str: def logged_cmd(log_file: TextIO | None, *vargs: str) -> str:
result = None result = None
try: try:
if log_file: if log_file:
@ -105,15 +103,14 @@ def logged_cmd(log_file: Optional[TextIO], *vargs: str) -> str:
raise err raise err
def match_owner( def match_owner(recordA: AttrDict | None, *records: AttrDict | None) -> str | None:
recordA: Optional[AttrDict], *records: Optional[AttrDict]
) -> Optional[str]:
if not recordA or not recordA.owners: if not recordA or not recordA.owners:
return None return None
for owner in recordA.owners: for owner in recordA.owners:
for otherRecord in records: for otherRecord in records:
if otherRecord and otherRecord.owners and owner in otherRecord.owners: if otherRecord and otherRecord.owners and owner in otherRecord.owners:
return owner result: str | None = owner
return result
return None return None
@ -147,9 +144,7 @@ class LaconicRegistryClient:
return self.cache["whoami"] return self.cache["whoami"]
args = ["laconic", "-c", self.config_file, "registry", "account", "get"] args = ["laconic", "-c", self.config_file, "registry", "account", "get"]
results = [ results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
if len(results): if len(results):
self.cache["whoami"] = results[0] self.cache["whoami"] = results[0]
@ -178,9 +173,7 @@ class LaconicRegistryClient:
"--address", "--address",
address, address,
] ]
results = [ results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
if len(results): if len(results):
self.cache["accounts"][address] = results[0] self.cache["accounts"][address] = results[0]
return results[0] return results[0]
@ -203,9 +196,7 @@ class LaconicRegistryClient:
"--id", "--id",
id, id,
] ]
results = [ results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
self._add_to_cache(results) self._add_to_cache(results)
if len(results): if len(results):
return results[0] return results[0]
@ -216,9 +207,7 @@ class LaconicRegistryClient:
def list_bonds(self): def list_bonds(self):
args = ["laconic", "-c", self.config_file, "registry", "bond", "list"] args = ["laconic", "-c", self.config_file, "registry", "bond", "list"]
results = [ results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
self._add_to_cache(results) self._add_to_cache(results)
return results return results
@ -232,12 +221,10 @@ class LaconicRegistryClient:
if criteria: if criteria:
for k, v in criteria.items(): for k, v in criteria.items():
args.append("--%s" % k) args.append(f"--{k}")
args.append(str(v)) args.append(str(v))
results = [ results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
# Most recent records first # Most recent records first
results.sort(key=lambda r: r.createTime or "") results.sort(key=lambda r: r.createTime or "")
@ -246,7 +233,7 @@ class LaconicRegistryClient:
return results return results
def _add_to_cache(self, records: List[AttrDict]) -> None: def _add_to_cache(self, records: list[AttrDict]) -> None:
if not records: if not records:
return return
@ -271,9 +258,7 @@ class LaconicRegistryClient:
args = ["laconic", "-c", self.config_file, "registry", "name", "resolve", name] args = ["laconic", "-c", self.config_file, "registry", "name", "resolve", name]
parsed = [ parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
if parsed: if parsed:
self._add_to_cache(parsed) self._add_to_cache(parsed)
return parsed[0] return parsed[0]
@ -303,9 +288,7 @@ class LaconicRegistryClient:
name_or_id, name_or_id,
] ]
parsed = [ parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
if len(parsed): if len(parsed):
self._add_to_cache(parsed) self._add_to_cache(parsed)
return parsed[0] return parsed[0]
@ -356,9 +339,7 @@ class LaconicRegistryClient:
results = None results = None
try: try:
results = [ results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r]
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
]
except: # noqa: E722 except: # noqa: E722
pass pass
@ -422,7 +403,7 @@ class LaconicRegistryClient:
record_file = open(record_fname, "w") record_file = open(record_fname, "w")
yaml.dump(record, record_file) yaml.dump(record, record_file)
record_file.close() record_file.close()
print(open(record_fname, "r").read(), file=self.log_file) print(open(record_fname).read(), file=self.log_file)
new_record_id = json.loads( new_record_id = json.loads(
logged_cmd( logged_cmd(
@ -573,10 +554,10 @@ def determine_base_container(clone_dir, app_type="webapp"):
def build_container_image( def build_container_image(
app_record: Optional[AttrDict], app_record: AttrDict | None,
tag: str, tag: str,
extra_build_args: Optional[List[str]] = None, extra_build_args: list[str] | None = None,
logger: Optional[TimedLogger] = None, logger: TimedLogger | None = None,
) -> None: ) -> None:
if app_record is None: if app_record is None:
raise ValueError("app_record cannot be None") raise ValueError("app_record cannot be None")
@ -649,9 +630,7 @@ def build_container_image(
) )
result.check_returncode() result.check_returncode()
base_container = determine_base_container( base_container = determine_base_container(clone_dir, app_record.attributes.app_type)
clone_dir, app_record.attributes.app_type
)
if logger: if logger:
logger.log("Building webapp ...") logger.log("Building webapp ...")
@ -727,14 +706,12 @@ def publish_deployment(
if not deploy_record: if not deploy_record:
deploy_ver = "0.0.1" deploy_ver = "0.0.1"
else: else:
deploy_ver = "0.0.%d" % ( deploy_ver = f"0.0.{int(deploy_record.attributes.version.split('.')[-1]) + 1}"
int(deploy_record.attributes.version.split(".")[-1]) + 1
)
if not dns_record: if not dns_record:
dns_ver = "0.0.1" dns_ver = "0.0.1"
else: else:
dns_ver = "0.0.%d" % (int(dns_record.attributes.version.split(".")[-1]) + 1) dns_ver = f"0.0.{int(dns_record.attributes.version.split('.')[-1]) + 1}"
spec = yaml.full_load(open(os.path.join(deployment_dir, "spec.yml"))) spec = yaml.full_load(open(os.path.join(deployment_dir, "spec.yml")))
fqdn = spec["network"]["http-proxy"][0]["host-name"] fqdn = spec["network"]["http-proxy"][0]["host-name"]
@ -779,13 +756,9 @@ def publish_deployment(
# Set auction or payment id from request # Set auction or payment id from request
if app_deployment_request.attributes.auction: if app_deployment_request.attributes.auction:
new_deployment_record["record"][ new_deployment_record["record"]["auction"] = app_deployment_request.attributes.auction
"auction"
] = app_deployment_request.attributes.auction
elif app_deployment_request.attributes.payment: elif app_deployment_request.attributes.payment:
new_deployment_record["record"][ new_deployment_record["record"]["payment"] = app_deployment_request.attributes.payment
"payment"
] = app_deployment_request.attributes.payment
if webapp_deployer_record: if webapp_deployer_record:
new_deployment_record["record"]["deployer"] = webapp_deployer_record.names[0] new_deployment_record["record"]["deployer"] = webapp_deployer_record.names[0]
@ -799,9 +772,7 @@ def publish_deployment(
def hostname_for_deployment_request(app_deployment_request, laconic): def hostname_for_deployment_request(app_deployment_request, laconic):
dns_name = app_deployment_request.attributes.dns dns_name = app_deployment_request.attributes.dns
if not dns_name: if not dns_name:
app = laconic.get_record( app = laconic.get_record(app_deployment_request.attributes.application, require=True)
app_deployment_request.attributes.application, require=True
)
dns_name = generate_hostname_for_app(app) dns_name = generate_hostname_for_app(app)
elif dns_name.startswith("lrn://"): elif dns_name.startswith("lrn://"):
record = laconic.get_record(dns_name, require=True) record = laconic.get_record(dns_name, require=True)
@ -818,7 +789,7 @@ def generate_hostname_for_app(app):
m.update(app.attributes.repository[0].encode()) m.update(app.attributes.repository[0].encode())
else: else:
m.update(app.attributes.repository.encode()) m.update(app.attributes.repository.encode())
return "%s-%s" % (last_part, m.hexdigest()[0:10]) return f"{last_part}-{m.hexdigest()[0:10]}"
def skip_by_tag(r, include_tags, exclude_tags): def skip_by_tag(r, include_tags, exclude_tags):
@ -881,16 +852,13 @@ def confirm_payment(
pay_denom = "".join([i for i in tx_amount if not i.isdigit()]) pay_denom = "".join([i for i in tx_amount if not i.isdigit()])
if pay_denom != "alnt": if pay_denom != "alnt":
logger.log( logger.log(
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected " f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected " "payment denomination"
"payment denomination"
) )
return False return False
pay_amount = int("".join([i for i in tx_amount if i.isdigit()]) or "0") pay_amount = int("".join([i for i in tx_amount if i.isdigit()]) or "0")
if pay_amount < min_amount: if pay_amount < min_amount:
logger.log( logger.log(f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}")
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
)
return False return False
# Check if the payment was already used on a deployment # Check if the payment was already used on a deployment
@ -914,9 +882,7 @@ def confirm_payment(
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True {"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
) )
if len(used): if len(used):
logger.log( logger.log(f"{record.id}: payment {tx.hash} already used on deployment removal {used}")
f"{record.id}: payment {tx.hash} already used on deployment removal {used}"
)
return False return False
return True return True
@ -940,9 +906,7 @@ def confirm_auction(
# Cross check app against application in the auction record # Cross check app against application in the auction record
requested_app = laconic.get_record(record.attributes.application, require=True) requested_app = laconic.get_record(record.attributes.application, require=True)
auction_app = laconic.get_record( auction_app = laconic.get_record(auction_records_by_id[0].attributes.application, require=True)
auction_records_by_id[0].attributes.application, require=True
)
requested_app_id = requested_app.id if requested_app else None requested_app_id = requested_app.id if requested_app else None
auction_app_id = auction_app.id if auction_app else None auction_app_id = auction_app.id if auction_app else None
if requested_app_id != auction_app_id: if requested_app_id != auction_app_id:

View File

@ -15,30 +15,24 @@
import click import click
from stack_orchestrator import opts, update, version
from stack_orchestrator.build import build_containers, build_npms, build_webapp, fetch_containers
from stack_orchestrator.command_types import CommandOptions from stack_orchestrator.command_types import CommandOptions
from stack_orchestrator.repos import setup_repositories from stack_orchestrator.deploy import deploy, deployment
from stack_orchestrator.repos import fetch_stack
from stack_orchestrator.build import build_containers, fetch_containers
from stack_orchestrator.build import build_npms
from stack_orchestrator.build import build_webapp
from stack_orchestrator.deploy.webapp import ( from stack_orchestrator.deploy.webapp import (
run_webapp,
deploy_webapp, deploy_webapp,
deploy_webapp_from_registry, deploy_webapp_from_registry,
undeploy_webapp_from_registry,
publish_webapp_deployer,
publish_deployment_auction,
handle_deployment_auction, handle_deployment_auction,
publish_deployment_auction,
publish_webapp_deployer,
request_webapp_deployment, request_webapp_deployment,
request_webapp_undeployment, request_webapp_undeployment,
run_webapp,
undeploy_webapp_from_registry,
) )
from stack_orchestrator.deploy import deploy from stack_orchestrator.repos import fetch_stack, setup_repositories
from stack_orchestrator import version
from stack_orchestrator.deploy import deployment
from stack_orchestrator import opts
from stack_orchestrator import update
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]}
@click.group(context_settings=CONTEXT_SETTINGS) @click.group(context_settings=CONTEXT_SETTINGS)

View File

@ -17,9 +17,9 @@
# CERC_REPO_BASE_DIR defaults to ~/cerc # CERC_REPO_BASE_DIR defaults to ~/cerc
import click
import os import os
import click
from decouple import config from decouple import config
from git import exc from git import exc
@ -36,9 +36,7 @@ from stack_orchestrator.util import error_exit
@click.pass_context @click.pass_context
def command(ctx, stack_locator, git_ssh, check_only, pull): def command(ctx, stack_locator, git_ssh, check_only, pull):
"""Optionally resolve then git clone a repository with stack definitions.""" """Optionally resolve then git clone a repository with stack definitions."""
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(str(config("CERC_REPO_BASE_DIR", default="~/cerc")))
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
)
if not opts.o.quiet: if not opts.o.quiet:
print(f"Dev Root is: {dev_root_path}") print(f"Dev Root is: {dev_root_path}")
try: try:

View File

@ -16,20 +16,22 @@
# env vars: # env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc # CERC_REPO_BASE_DIR defaults to ~/cerc
import importlib.resources
import os import os
import sys import sys
from decouple import config
import git
from git.exc import GitCommandError, InvalidGitRepositoryError
from typing import Any from typing import Any
from tqdm import tqdm
import click import click
import importlib.resources import git
from decouple import config
from git.exc import GitCommandError, InvalidGitRepositoryError
from tqdm import tqdm
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts
from stack_orchestrator.util import ( from stack_orchestrator.util import (
error_exit,
get_parsed_stack_config, get_parsed_stack_config,
include_exclude_check, include_exclude_check,
error_exit,
warn_exit, warn_exit,
) )
@ -86,48 +88,38 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
current_repo_branch_or_tag = "***UNDETERMINED***" current_repo_branch_or_tag = "***UNDETERMINED***"
is_branch = False is_branch = False
try: try:
current_repo_branch_or_tag = git.Repo( current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).active_branch.name
full_filesystem_repo_path
).active_branch.name
is_branch = True is_branch = True
except TypeError: except TypeError:
# This means that the current ref is not a branch, so possibly a tag # This means that the current ref is not a branch, so possibly a tag
# Let's try to get the tag # Let's try to get the tag
try: try:
current_repo_branch_or_tag = git.Repo( current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe(
full_filesystem_repo_path "--tags", "--exact-match"
).git.describe("--tags", "--exact-match") )
# Note that git is asymmetric -- the tag you told it to check out # Note that git is asymmetric -- the tag you told it to check out
# may not be the one you get back here (if there are multiple tags # may not be the one you get back here (if there are multiple tags
# associated with the same commit) # associated with the same commit)
except GitCommandError: except GitCommandError:
# If there is no matching branch or tag checked out, just use the current # If there is no matching branch or tag checked out, just use the current
# SHA # SHA
current_repo_branch_or_tag = ( current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
)
return current_repo_branch_or_tag, is_branch return current_repo_branch_or_tag, is_branch
# TODO: fix the messy arg list here # TODO: fix the messy arg list here
def process_repo( def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo
):
if opts.o.verbose: if opts.o.verbose:
print(f"Processing repo: {fully_qualified_repo}") print(f"Processing repo: {fully_qualified_repo}")
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo) repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
git_ssh_prefix = f"git@{repo_host}:" git_ssh_prefix = f"git@{repo_host}:"
git_http_prefix = f"https://{repo_host}/" git_http_prefix = f"https://{repo_host}/"
full_github_repo_path = ( full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
)
repoName = repo_path.split("/")[-1] repoName = repo_path.split("/")[-1]
full_filesystem_repo_path = os.path.join(dev_root_path, repoName) full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
is_present = os.path.isdir(full_filesystem_repo_path) is_present = os.path.isdir(full_filesystem_repo_path)
(current_repo_branch_or_tag, is_branch) = ( (current_repo_branch_or_tag, is_branch) = (
_get_repo_current_branch_or_tag(full_filesystem_repo_path) _get_repo_current_branch_or_tag(full_filesystem_repo_path) if is_present else (None, None)
if is_present
else (None, None)
) )
if not opts.o.quiet: if not opts.o.quiet:
present_text = ( present_text = (
@ -140,10 +132,7 @@ def process_repo(
# Quick check that it's actually a repo # Quick check that it's actually a repo
if is_present: if is_present:
if not is_git_repo(full_filesystem_repo_path): if not is_git_repo(full_filesystem_repo_path):
print( print(f"Error: {full_filesystem_repo_path} does not contain " "a valid git repository")
f"Error: {full_filesystem_repo_path} does not contain "
"a valid git repository"
)
sys.exit(1) sys.exit(1)
else: else:
if pull: if pull:
@ -190,8 +179,7 @@ def process_repo(
if branch_to_checkout: if branch_to_checkout:
if current_repo_branch_or_tag is None or ( if current_repo_branch_or_tag is None or (
current_repo_branch_or_tag current_repo_branch_or_tag and (current_repo_branch_or_tag != branch_to_checkout)
and (current_repo_branch_or_tag != branch_to_checkout)
): ):
if not opts.o.quiet: if not opts.o.quiet:
print(f"switching to branch {branch_to_checkout} in repo {repo_path}") print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
@ -245,14 +233,9 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
print( print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " f"{dev_root_path}")
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
f"{dev_root_path}"
)
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(str(config("CERC_REPO_BASE_DIR", default="~/cerc")))
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
)
if not quiet: if not quiet:
print(f"Dev Root is: {dev_root_path}") print(f"Dev Root is: {dev_root_path}")
@ -265,9 +248,7 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from stack_orchestrator import data
with importlib.resources.open_text( with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file:
data, "repository-list.txt"
) as repository_list_file:
all_repos = repository_list_file.read().splitlines() all_repos = repository_list_file.read().splitlines()
repos_in_scope = [] repos_in_scope = []

View File

@ -13,16 +13,18 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import datetime import datetime
import filecmp import filecmp
import os import os
from pathlib import Path
import requests
import sys
import stat
import shutil import shutil
import stat
import sys
from pathlib import Path
import click
import requests
import validators import validators
from stack_orchestrator.util import get_yaml from stack_orchestrator.util import get_yaml
@ -40,9 +42,7 @@ def _error_exit(s: str):
# Note at present this probably won't work on non-Unix based OSes like Windows # Note at present this probably won't work on non-Unix based OSes like Windows
@click.command() @click.command()
@click.option( @click.option("--check-only", is_flag=True, default=False, help="only check, don't update")
"--check-only", is_flag=True, default=False, help="only check, don't update"
)
@click.pass_context @click.pass_context
def command(ctx, check_only): def command(ctx, check_only):
"""update shiv binary from a distribution url""" """update shiv binary from a distribution url"""
@ -52,7 +52,7 @@ def command(ctx, check_only):
if not config_file_path.exists(): if not config_file_path.exists():
_error_exit(f"Error: Config file: {config_file_path} not found") _error_exit(f"Error: Config file: {config_file_path} not found")
yaml = get_yaml() yaml = get_yaml()
config = yaml.load(open(config_file_path, "r")) config = yaml.load(open(config_file_path))
if "distribution-url" not in config: if "distribution-url" not in config:
_error_exit(f"Error: {config_key} not defined in {config_file_path}") _error_exit(f"Error: {config_key} not defined in {config_file_path}")
distribution_url = config[config_key] distribution_url = config[config_key]
@ -61,9 +61,7 @@ def command(ctx, check_only):
_error_exit(f"ERROR: distribution url: {distribution_url} is not valid") _error_exit(f"ERROR: distribution url: {distribution_url} is not valid")
# Figure out the filename for ourselves # Figure out the filename for ourselves
shiv_binary_path = Path(sys.argv[0]) shiv_binary_path = Path(sys.argv[0])
timestamp_filename = ( timestamp_filename = f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
)
temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename) temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename)
# Download the file to a temp filename # Download the file to a temp filename
if ctx.obj.verbose: if ctx.obj.verbose:

View File

@ -13,14 +13,17 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from decouple import config
import os.path import os.path
import sys import sys
import ruamel.yaml from collections.abc import Mapping
from pathlib import Path from pathlib import Path
from typing import NoReturn
import ruamel.yaml
from decouple import config
from dotenv import dotenv_values from dotenv import dotenv_values
from typing import Mapping, NoReturn, Optional, Set, List
from stack_orchestrator.constants import stack_file_name, deployment_file_name from stack_orchestrator.constants import deployment_file_name, stack_file_name
def include_exclude_check(s, include, exclude): def include_exclude_check(s, include, exclude):
@ -50,14 +53,9 @@ def get_dev_root_path(ctx):
if ctx and ctx.local_stack: if ctx and ctx.local_stack:
# TODO: This code probably doesn't work # TODO: This code probably doesn't work
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
print( print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: " f"{dev_root_path}")
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
f"{dev_root_path}"
)
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(str(config("CERC_REPO_BASE_DIR", default="~/cerc")))
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
)
return dev_root_path return dev_root_path
@ -65,7 +63,7 @@ def get_dev_root_path(ctx):
def get_parsed_stack_config(stack): def get_parsed_stack_config(stack):
stack_file_path = get_stack_path(stack).joinpath(stack_file_name) stack_file_path = get_stack_path(stack).joinpath(stack_file_name)
if stack_file_path.exists(): if stack_file_path.exists():
return get_yaml().load(open(stack_file_path, "r")) return get_yaml().load(open(stack_file_path))
# We try here to generate a useful diagnostic error # We try here to generate a useful diagnostic error
# First check if the stack directory is present # First check if the stack directory is present
if stack_file_path.parent.exists(): if stack_file_path.parent.exists():
@ -101,10 +99,10 @@ def get_job_list(parsed_stack):
return result return result
def get_plugin_code_paths(stack) -> List[Path]: def get_plugin_code_paths(stack) -> list[Path]:
parsed_stack = get_parsed_stack_config(stack) parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"] pods = parsed_stack["pods"]
result: Set[Path] = set() result: set[Path] = set()
for pod in pods: for pod in pods:
if type(pod) is str: if type(pod) is str:
result.add(get_stack_path(stack)) result.add(get_stack_path(stack))
@ -191,7 +189,7 @@ def get_job_file_path(stack, parsed_stack, job_name: str):
def get_pod_script_paths(parsed_stack, pod_name: str): def get_pod_script_paths(parsed_stack, pod_name: str):
pods = parsed_stack["pods"] pods = parsed_stack["pods"]
result = [] result = []
if not type(pods[0]) is str: if type(pods[0]) is not str:
for pod in pods: for pod in pods:
if pod["name"] == pod_name: if pod["name"] == pod_name:
pod_root_dir = os.path.join( pod_root_dir = os.path.join(
@ -243,7 +241,7 @@ def get_k8s_dir():
def get_parsed_deployment_spec(spec_file): def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file) spec_file_path = Path(spec_file)
try: try:
return get_yaml().load(open(spec_file_path, "r")) return get_yaml().load(open(spec_file_path))
except FileNotFoundError as error: except FileNotFoundError as error:
# We try here to generate a useful diagnostic error # We try here to generate a useful diagnostic error
print(f"Error: spec file: {spec_file_path} does not exist") print(f"Error: spec file: {spec_file_path} does not exist")
@ -293,5 +291,6 @@ def warn_exit(s) -> NoReturn:
sys.exit(0) sys.exit(0)
def env_var_map_from_file(file: Path) -> Mapping[str, Optional[str]]: def env_var_map_from_file(file: Path) -> Mapping[str, str | None]:
return dotenv_values(file) result: Mapping[str, str | None] = dotenv_values(file)
return result

View File

@ -13,8 +13,9 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
from importlib import metadata, resources
import click import click
from importlib import resources, metadata
@click.command() @click.command()