2023-09-10 19:28:26 +00:00
|
|
|
# Copyright © 2022, 2023 Vulcanize
|
2023-06-27 22:58:41 +00:00
|
|
|
|
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
|
# (at your option) any later version.
|
|
|
|
|
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
|
|
import click
|
2023-07-24 02:54:05 +00:00
|
|
|
from importlib import util
|
2026-02-03 17:25:33 +00:00
|
|
|
import json
|
2023-06-27 22:58:41 +00:00
|
|
|
import os
|
2026-02-03 05:55:14 +00:00
|
|
|
import re
|
|
|
|
|
import base64
|
2023-06-27 22:58:41 +00:00
|
|
|
from pathlib import Path
|
2026-02-01 00:05:27 +00:00
|
|
|
from typing import List, Optional
|
2023-09-04 18:14:05 +00:00
|
|
|
import random
|
2025-10-17 15:21:23 +00:00
|
|
|
from shutil import copy, copyfile, copytree, rmtree
|
2023-12-06 05:56:58 +00:00
|
|
|
from secrets import token_hex
|
2026-03-16 08:01:11 +00:00
|
|
|
import subprocess
|
2023-06-27 22:58:41 +00:00
|
|
|
import sys
|
2025-10-17 15:21:23 +00:00
|
|
|
import filecmp
|
|
|
|
|
import tempfile
|
|
|
|
|
|
2023-11-20 16:12:57 +00:00
|
|
|
from stack_orchestrator import constants
|
2026-03-16 08:01:11 +00:00
|
|
|
from stack_orchestrator.ids import generate_id
|
2023-11-28 05:02:16 +00:00
|
|
|
from stack_orchestrator.opts import opts
|
2026-01-22 01:58:31 +00:00
|
|
|
from stack_orchestrator.util import (
|
|
|
|
|
get_stack_path,
|
|
|
|
|
get_parsed_deployment_spec,
|
|
|
|
|
get_parsed_stack_config,
|
|
|
|
|
global_options,
|
|
|
|
|
get_yaml,
|
|
|
|
|
get_pod_list,
|
|
|
|
|
get_pod_file_path,
|
|
|
|
|
pod_has_scripts,
|
|
|
|
|
get_pod_script_paths,
|
|
|
|
|
get_plugin_code_paths,
|
|
|
|
|
error_exit,
|
|
|
|
|
env_var_map_from_file,
|
|
|
|
|
resolve_config_dir,
|
|
|
|
|
get_job_list,
|
|
|
|
|
get_job_file_path,
|
|
|
|
|
)
|
2024-02-14 21:45:01 +00:00
|
|
|
from stack_orchestrator.deploy.spec import Spec
|
2023-11-08 08:11:00 +00:00
|
|
|
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
feat(k8s): auto-ConfigMap for file-level host-path compose volumes
File-level host-path compose volumes (e.g. `../config/foo.sh:/opt/foo.sh`)
were synthesized into a kind extraMount + hostPath PV chain with a
sanitized containerPath (`/mnt/host-path-<sanitized>`). The sanitized
name is derived from the compose volume source and is identical across
deployments of the same stack, so two deployments sharing a cluster
collided at the containerPath — kind only honors the first deployment's
bind, subsequent deployments' pods silently read the first's content.
The same code path was also broken on real k8s, which has no way to
populate `/mnt/host-path-*` on worker nodes.
File-level compose binds are conceptually k8s ConfigMaps. The snowball
stack already uses the ConfigMap-backed named-volume pattern by hand.
Make that automatic at the k8s object-generation layer, without
touching deployment-dir compose or spec files.
Behavior at deploy create (validation only, no file mutation):
- :rw on a host-path bind -> DeployerException (use a named
volume for writable data)
- Directory with subdirectories -> DeployerException (embed in image,
split into configmaps, or use
initContainer)
- Directory or file > ~700 KiB -> DeployerException (ConfigMap budget)
- File, or flat small directory -> accepted, handled at deploy start
Behavior at deploy start:
- cluster_info.get_configmaps() additionally walks pod + job compose
volumes and emits a V1ConfigMap per host-path bind (deduped by
sanitized name across all pods/services). Content read from
{deployment_dir}/config/<pod>/<file> (already populated by
_copy_extra_config_dirs).
- volumes_for_pod_files emits V1ConfigMapVolumeSource instead of
V1HostPathVolumeSource for host-path binds.
- volume_mounts_for_service stats the source and sets V1VolumeMount
sub_path to the filename when source is a regular file — single-key
ConfigMaps land as files, whole-dir ConfigMaps land as directories.
- _generate_kind_mounts no longer emits `/mnt/host-path-*` extraMounts
for these binds (the ConfigMap path bypasses kind node FS entirely).
Deployment dir layout is unchanged. Compose files, spec.yml, and
{deployment_dir}/config/<pod>/ remain exactly as today — trivially
diffable against stack source, no synthetic volume names. ConfigMaps
are visible only in k8s (kubectl get cm -n <ns>).
The existing `/mnt/host-path-*` skip in check_mounts_compatible is
retained as a transition tolerance for deployments created before
this change.
Updates:
- deployment_create: _validate_host_path_mounts() called per pod/job
in the create loops; 700 KiB ConfigMap budget (accounts for base64
+ metadata overhead)
- helpers: _generate_kind_mounts skips host-path entries;
volumes_for_pod_files emits ConfigMap-backed V1Volume;
volume_mounts_for_service takes optional deployment_dir and
auto-sets sub_path for single-file sources
- cluster_info: new _host_path_bind_configmaps() walked from
get_configmaps(); volume_mounts_for_service call passes
deployment_dir from spec.file_path
- docs: document the behavior and the rejected shapes in
deployment_patterns.md
- tests: k8s-deploy asserts the host-path ConfigMaps exist,
compose/spec unchanged, and no `/mnt/host-path-*` extraMounts
Refs: so-b86
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 13:13:43 +00:00
|
|
|
from stack_orchestrator.deploy.deployer import DeployerException
|
2023-11-07 07:06:55 +00:00
|
|
|
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
2023-11-08 08:11:00 +00:00
|
|
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
feat(k8s): auto-ConfigMap for file-level host-path compose volumes
File-level host-path compose volumes (e.g. `../config/foo.sh:/opt/foo.sh`)
were synthesized into a kind extraMount + hostPath PV chain with a
sanitized containerPath (`/mnt/host-path-<sanitized>`). The sanitized
name is derived from the compose volume source and is identical across
deployments of the same stack, so two deployments sharing a cluster
collided at the containerPath — kind only honors the first deployment's
bind, subsequent deployments' pods silently read the first's content.
The same code path was also broken on real k8s, which has no way to
populate `/mnt/host-path-*` on worker nodes.
File-level compose binds are conceptually k8s ConfigMaps. The snowball
stack already uses the ConfigMap-backed named-volume pattern by hand.
Make that automatic at the k8s object-generation layer, without
touching deployment-dir compose or spec files.
Behavior at deploy create (validation only, no file mutation):
- :rw on a host-path bind -> DeployerException (use a named
volume for writable data)
- Directory with subdirectories -> DeployerException (embed in image,
split into configmaps, or use
initContainer)
- Directory or file > ~700 KiB -> DeployerException (ConfigMap budget)
- File, or flat small directory -> accepted, handled at deploy start
Behavior at deploy start:
- cluster_info.get_configmaps() additionally walks pod + job compose
volumes and emits a V1ConfigMap per host-path bind (deduped by
sanitized name across all pods/services). Content read from
{deployment_dir}/config/<pod>/<file> (already populated by
_copy_extra_config_dirs).
- volumes_for_pod_files emits V1ConfigMapVolumeSource instead of
V1HostPathVolumeSource for host-path binds.
- volume_mounts_for_service stats the source and sets V1VolumeMount
sub_path to the filename when source is a regular file — single-key
ConfigMaps land as files, whole-dir ConfigMaps land as directories.
- _generate_kind_mounts no longer emits `/mnt/host-path-*` extraMounts
for these binds (the ConfigMap path bypasses kind node FS entirely).
Deployment dir layout is unchanged. Compose files, spec.yml, and
{deployment_dir}/config/<pod>/ remain exactly as today — trivially
diffable against stack source, no synthetic volume names. ConfigMaps
are visible only in k8s (kubectl get cm -n <ns>).
The existing `/mnt/host-path-*` skip in check_mounts_compatible is
retained as a transition tolerance for deployments created before
this change.
Updates:
- deployment_create: _validate_host_path_mounts() called per pod/job
in the create loops; 700 KiB ConfigMap budget (accounts for base64
+ metadata overhead)
- helpers: _generate_kind_mounts skips host-path entries;
volumes_for_pod_files emits ConfigMap-backed V1Volume;
volume_mounts_for_service takes optional deployment_dir and
auto-sets sub_path for single-file sources
- cluster_info: new _host_path_bind_configmaps() walked from
get_configmaps(); volume_mounts_for_service call passes
deployment_dir from spec.file_path
- docs: document the behavior and the rejected shapes in
deployment_patterns.md
- tests: k8s-deploy asserts the host-path ConfigMaps exist,
compose/spec unchanged, and no `/mnt/host-path-*` extraMounts
Refs: so-b86
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 13:13:43 +00:00
|
|
|
from stack_orchestrator.deploy.k8s.helpers import is_host_path_mount
|
2023-07-25 16:16:19 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
|
|
|
|
|
def _make_default_deployment_dir():
|
2023-11-20 16:12:57 +00:00
|
|
|
return Path("deployment-001")
|
2023-06-27 22:58:41 +00:00
|
|
|
|
2023-08-17 19:49:56 +00:00
|
|
|
|
2023-08-11 20:25:54 +00:00
|
|
|
def _get_ports(stack):
|
|
|
|
|
ports = {}
|
|
|
|
|
parsed_stack = get_parsed_stack_config(stack)
|
2023-10-09 20:54:55 +00:00
|
|
|
pods = get_pod_list(parsed_stack)
|
2023-08-11 20:25:54 +00:00
|
|
|
yaml = get_yaml()
|
|
|
|
|
for pod in pods:
|
2024-04-18 21:22:47 +00:00
|
|
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
2026-01-22 06:10:36 +00:00
|
|
|
if pod_file_path is None:
|
|
|
|
|
continue
|
2023-08-11 20:25:54 +00:00
|
|
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
|
|
|
|
if "services" in parsed_pod_file:
|
|
|
|
|
for svc_name, svc in parsed_pod_file["services"].items():
|
|
|
|
|
if "ports" in svc:
|
2026-01-22 01:58:31 +00:00
|
|
|
# Ports can appear as strings or numbers. We normalize them as
|
|
|
|
|
# strings.
|
2023-09-04 19:00:23 +00:00
|
|
|
ports[svc_name] = [str(x) for x in svc["ports"]]
|
2023-08-11 20:25:54 +00:00
|
|
|
return ports
|
2023-06-27 22:58:41 +00:00
|
|
|
|
2023-08-17 19:49:56 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
def _get_named_volumes(stack):
|
|
|
|
|
# Parse the compose files looking for named volumes
|
2026-01-22 01:58:31 +00:00
|
|
|
named_volumes = {"rw": [], "ro": []}
|
2023-06-27 22:58:41 +00:00
|
|
|
parsed_stack = get_parsed_stack_config(stack)
|
2023-10-09 20:54:55 +00:00
|
|
|
pods = get_pod_list(parsed_stack)
|
2023-07-24 02:54:05 +00:00
|
|
|
yaml = get_yaml()
|
2024-01-31 05:09:48 +00:00
|
|
|
|
|
|
|
|
def find_vol_usage(parsed_pod_file, vol):
|
|
|
|
|
ret = {}
|
|
|
|
|
if "services" in parsed_pod_file:
|
|
|
|
|
for svc_name, svc in parsed_pod_file["services"].items():
|
|
|
|
|
if "volumes" in svc:
|
|
|
|
|
for svc_volume in svc["volumes"]:
|
|
|
|
|
parts = svc_volume.split(":")
|
|
|
|
|
if parts[0] == vol:
|
|
|
|
|
ret[svc_name] = {
|
|
|
|
|
"volume": parts[0],
|
|
|
|
|
"mount": parts[1],
|
2026-01-22 01:58:31 +00:00
|
|
|
"options": parts[2] if len(parts) == 3 else None,
|
2024-01-31 05:09:48 +00:00
|
|
|
}
|
|
|
|
|
return ret
|
|
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
for pod in pods:
|
2024-04-18 21:22:47 +00:00
|
|
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
2026-01-22 06:10:36 +00:00
|
|
|
if pod_file_path is None:
|
|
|
|
|
continue
|
2023-06-27 22:58:41 +00:00
|
|
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
|
|
|
|
if "volumes" in parsed_pod_file:
|
|
|
|
|
volumes = parsed_pod_file["volumes"]
|
|
|
|
|
for volume in volumes.keys():
|
2024-01-31 05:09:48 +00:00
|
|
|
for vu in find_vol_usage(parsed_pod_file, volume).values():
|
|
|
|
|
read_only = vu["options"] == "ro"
|
|
|
|
|
if read_only:
|
2026-01-22 01:58:31 +00:00
|
|
|
if (
|
|
|
|
|
vu["volume"] not in named_volumes["rw"]
|
|
|
|
|
and vu["volume"] not in named_volumes["ro"]
|
|
|
|
|
):
|
2024-01-31 05:09:48 +00:00
|
|
|
named_volumes["ro"].append(vu["volume"])
|
|
|
|
|
else:
|
|
|
|
|
if vu["volume"] not in named_volumes["rw"]:
|
|
|
|
|
named_volumes["rw"].append(vu["volume"])
|
|
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
return named_volumes
|
|
|
|
|
|
|
|
|
|
|
2023-06-28 03:18:04 +00:00
|
|
|
# If we're mounting a volume from a relatie path, then we
|
|
|
|
|
# assume the directory doesn't exist yet and create it
|
|
|
|
|
# so the deployment will start
|
|
|
|
|
# Also warn if the path is absolute and doesn't exist
|
|
|
|
|
def _create_bind_dir_if_relative(volume, path_string, compose_dir):
|
2026-04-14 12:03:47 +00:00
|
|
|
path = Path(os.path.expanduser(path_string))
|
2023-06-28 03:18:04 +00:00
|
|
|
if not path.is_absolute():
|
|
|
|
|
absolute_path = Path(compose_dir).parent.joinpath(path)
|
|
|
|
|
absolute_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
else:
|
|
|
|
|
if not path.exists():
|
2026-01-22 01:58:31 +00:00
|
|
|
print(
|
|
|
|
|
f"WARNING: mount path for volume {volume} does not exist: {path_string}"
|
|
|
|
|
)
|
2023-06-28 03:18:04 +00:00
|
|
|
|
|
|
|
|
|
2026-01-22 01:58:31 +00:00
|
|
|
# See:
|
|
|
|
|
# https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
2023-06-28 03:18:04 +00:00
|
|
|
def _fixup_pod_file(pod, spec, compose_dir):
|
2024-02-14 21:45:01 +00:00
|
|
|
deployment_type = spec[constants.deploy_to_key]
|
2023-08-17 19:49:56 +00:00
|
|
|
# Fix up volumes
|
|
|
|
|
if "volumes" in spec:
|
|
|
|
|
spec_volumes = spec["volumes"]
|
|
|
|
|
if "volumes" in pod:
|
|
|
|
|
pod_volumes = pod["volumes"]
|
|
|
|
|
for volume in pod_volumes.keys():
|
|
|
|
|
if volume in spec_volumes:
|
|
|
|
|
volume_spec = spec_volumes[volume]
|
2024-02-14 21:45:01 +00:00
|
|
|
if volume_spec:
|
2026-01-22 01:58:31 +00:00
|
|
|
volume_spec_fixedup = (
|
|
|
|
|
volume_spec
|
|
|
|
|
if Path(volume_spec).is_absolute()
|
|
|
|
|
else f".{volume_spec}"
|
|
|
|
|
)
|
2024-02-14 21:45:01 +00:00
|
|
|
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
|
|
|
|
# this is Docker specific
|
|
|
|
|
if spec.is_docker_deployment():
|
|
|
|
|
new_volume_spec = {
|
|
|
|
|
"driver": "local",
|
|
|
|
|
"driver_opts": {
|
|
|
|
|
"type": "none",
|
|
|
|
|
"device": volume_spec_fixedup,
|
2026-01-22 01:58:31 +00:00
|
|
|
"o": "bind",
|
|
|
|
|
},
|
2024-02-14 21:45:01 +00:00
|
|
|
}
|
|
|
|
|
pod["volumes"][volume] = new_volume_spec
|
2024-01-31 05:09:48 +00:00
|
|
|
|
|
|
|
|
# Fix up configmaps
|
2024-02-14 21:45:01 +00:00
|
|
|
if constants.configmaps_key in spec:
|
|
|
|
|
if spec.is_kubernetes_deployment():
|
|
|
|
|
spec_cfgmaps = spec[constants.configmaps_key]
|
|
|
|
|
if "volumes" in pod:
|
|
|
|
|
pod_volumes = pod[constants.volumes_key]
|
|
|
|
|
for volume in pod_volumes.keys():
|
|
|
|
|
if volume in spec_cfgmaps:
|
|
|
|
|
volume_cfg = spec_cfgmaps[volume]
|
|
|
|
|
# Just make the dir (if necessary)
|
|
|
|
|
_create_bind_dir_if_relative(volume, volume_cfg, compose_dir)
|
|
|
|
|
else:
|
|
|
|
|
print(f"Warning: ConfigMaps not supported for {deployment_type}")
|
2024-01-31 05:09:48 +00:00
|
|
|
|
2023-08-17 19:49:56 +00:00
|
|
|
# Fix up ports
|
2023-11-21 23:04:36 +00:00
|
|
|
if "network" in spec and "ports" in spec["network"]:
|
|
|
|
|
spec_ports = spec["network"]["ports"]
|
2023-08-17 19:49:56 +00:00
|
|
|
for container_name, container_ports in spec_ports.items():
|
|
|
|
|
if container_name in pod["services"]:
|
|
|
|
|
pod["services"][container_name]["ports"] = container_ports
|
2023-06-27 22:58:41 +00:00
|
|
|
|
|
|
|
|
|
2023-11-08 08:11:00 +00:00
|
|
|
def _commands_plugin_paths(stack_name: str):
|
|
|
|
|
plugin_paths = get_plugin_code_paths(stack_name)
|
2023-10-27 18:57:13 +00:00
|
|
|
ret = [p.joinpath("deploy", "commands.py") for p in plugin_paths]
|
|
|
|
|
return ret
|
2023-10-09 20:54:55 +00:00
|
|
|
|
|
|
|
|
|
2023-10-10 21:32:07 +00:00
|
|
|
# See: https://stackoverflow.com/a/54625079/1701505
|
|
|
|
|
def _has_method(o, name):
|
|
|
|
|
return callable(getattr(o, name, None))
|
|
|
|
|
|
|
|
|
|
|
2023-07-30 04:38:46 +00:00
|
|
|
def call_stack_deploy_init(deploy_command_context):
|
2023-07-24 02:54:05 +00:00
|
|
|
# Link with the python file in the stack
|
|
|
|
|
# Call a function in it
|
|
|
|
|
# If no function found, return None
|
2023-11-08 08:11:00 +00:00
|
|
|
python_file_paths = _commands_plugin_paths(deploy_command_context.stack)
|
2023-10-27 18:57:13 +00:00
|
|
|
|
|
|
|
|
ret = None
|
|
|
|
|
init_done = False
|
|
|
|
|
for python_file_path in python_file_paths:
|
|
|
|
|
if python_file_path.exists():
|
|
|
|
|
spec = util.spec_from_file_location("commands", python_file_path)
|
2026-01-22 06:10:36 +00:00
|
|
|
if spec is None or spec.loader is None:
|
|
|
|
|
continue
|
2023-10-27 18:57:13 +00:00
|
|
|
imported_stack = util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(imported_stack)
|
|
|
|
|
if _has_method(imported_stack, "init"):
|
|
|
|
|
if not init_done:
|
|
|
|
|
ret = imported_stack.init(deploy_command_context)
|
|
|
|
|
init_done = True
|
|
|
|
|
else:
|
|
|
|
|
# TODO: remove this restriction
|
2026-01-22 01:58:31 +00:00
|
|
|
print(
|
|
|
|
|
f"Skipping init() from plugin {python_file_path}. "
|
|
|
|
|
"Only one init() is allowed."
|
|
|
|
|
)
|
2023-10-27 18:57:13 +00:00
|
|
|
return ret
|
2023-07-24 02:54:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# TODO: fold this with function above
|
2026-01-22 01:58:31 +00:00
|
|
|
def call_stack_deploy_setup(
|
|
|
|
|
deploy_command_context, parameters: LaconicStackSetupCommand, extra_args
|
|
|
|
|
):
|
2023-07-24 02:54:05 +00:00
|
|
|
# Link with the python file in the stack
|
|
|
|
|
# Call a function in it
|
|
|
|
|
# If no function found, return None
|
2023-11-08 08:11:00 +00:00
|
|
|
python_file_paths = _commands_plugin_paths(deploy_command_context.stack)
|
2023-10-27 18:57:13 +00:00
|
|
|
for python_file_path in python_file_paths:
|
|
|
|
|
if python_file_path.exists():
|
|
|
|
|
spec = util.spec_from_file_location("commands", python_file_path)
|
2026-01-22 06:10:36 +00:00
|
|
|
if spec is None or spec.loader is None:
|
|
|
|
|
continue
|
2023-10-27 18:57:13 +00:00
|
|
|
imported_stack = util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(imported_stack)
|
|
|
|
|
if _has_method(imported_stack, "setup"):
|
|
|
|
|
imported_stack.setup(deploy_command_context, parameters, extra_args)
|
2023-07-24 02:54:05 +00:00
|
|
|
|
|
|
|
|
|
2023-07-25 16:16:19 +00:00
|
|
|
# TODO: fold this with function above
|
2023-08-23 21:20:28 +00:00
|
|
|
def call_stack_deploy_create(deployment_context, extra_args):
|
2023-07-25 16:16:19 +00:00
|
|
|
# Link with the python file in the stack
|
|
|
|
|
# Call a function in it
|
|
|
|
|
# If no function found, return None
|
2023-11-08 08:11:00 +00:00
|
|
|
python_file_paths = _commands_plugin_paths(deployment_context.stack.name)
|
2023-10-27 18:57:13 +00:00
|
|
|
for python_file_path in python_file_paths:
|
|
|
|
|
if python_file_path.exists():
|
|
|
|
|
spec = util.spec_from_file_location("commands", python_file_path)
|
2026-01-22 06:10:36 +00:00
|
|
|
if spec is None or spec.loader is None:
|
|
|
|
|
continue
|
2023-10-27 18:57:13 +00:00
|
|
|
imported_stack = util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(imported_stack)
|
|
|
|
|
if _has_method(imported_stack, "create"):
|
|
|
|
|
imported_stack.create(deployment_context, extra_args)
|
2023-07-25 16:16:19 +00:00
|
|
|
|
|
|
|
|
|
2026-03-11 03:56:21 +00:00
|
|
|
def call_stack_deploy_start(deployment_context):
|
|
|
|
|
"""Call start() hooks after k8s deployments and jobs are created.
|
|
|
|
|
|
|
|
|
|
The start() hook receives the DeploymentContext, allowing stacks to
|
|
|
|
|
create additional k8s resources (Services, etc.) in the deployment namespace.
|
|
|
|
|
The namespace can be derived as f"laconic-{deployment_context.id}".
|
|
|
|
|
"""
|
2026-03-20 15:54:46 +00:00
|
|
|
python_file_paths = _commands_plugin_paths(deployment_context.stack.name)
|
2026-03-11 03:56:21 +00:00
|
|
|
for python_file_path in python_file_paths:
|
|
|
|
|
if python_file_path.exists():
|
|
|
|
|
spec = util.spec_from_file_location("commands", python_file_path)
|
|
|
|
|
if spec is None or spec.loader is None:
|
|
|
|
|
continue
|
|
|
|
|
imported_stack = util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(imported_stack)
|
|
|
|
|
if _has_method(imported_stack, "start"):
|
|
|
|
|
imported_stack.start(deployment_context)
|
|
|
|
|
|
|
|
|
|
|
2023-07-18 14:59:07 +00:00
|
|
|
# Inspect the pod yaml to find config files referenced in subdirectories
|
feat(k8s): auto-ConfigMap for file-level host-path compose volumes
File-level host-path compose volumes (e.g. `../config/foo.sh:/opt/foo.sh`)
were synthesized into a kind extraMount + hostPath PV chain with a
sanitized containerPath (`/mnt/host-path-<sanitized>`). The sanitized
name is derived from the compose volume source and is identical across
deployments of the same stack, so two deployments sharing a cluster
collided at the containerPath — kind only honors the first deployment's
bind, subsequent deployments' pods silently read the first's content.
The same code path was also broken on real k8s, which has no way to
populate `/mnt/host-path-*` on worker nodes.
File-level compose binds are conceptually k8s ConfigMaps. The snowball
stack already uses the ConfigMap-backed named-volume pattern by hand.
Make that automatic at the k8s object-generation layer, without
touching deployment-dir compose or spec files.
Behavior at deploy create (validation only, no file mutation):
- :rw on a host-path bind -> DeployerException (use a named
volume for writable data)
- Directory with subdirectories -> DeployerException (embed in image,
split into configmaps, or use
initContainer)
- Directory or file > ~700 KiB -> DeployerException (ConfigMap budget)
- File, or flat small directory -> accepted, handled at deploy start
Behavior at deploy start:
- cluster_info.get_configmaps() additionally walks pod + job compose
volumes and emits a V1ConfigMap per host-path bind (deduped by
sanitized name across all pods/services). Content read from
{deployment_dir}/config/<pod>/<file> (already populated by
_copy_extra_config_dirs).
- volumes_for_pod_files emits V1ConfigMapVolumeSource instead of
V1HostPathVolumeSource for host-path binds.
- volume_mounts_for_service stats the source and sets V1VolumeMount
sub_path to the filename when source is a regular file — single-key
ConfigMaps land as files, whole-dir ConfigMaps land as directories.
- _generate_kind_mounts no longer emits `/mnt/host-path-*` extraMounts
for these binds (the ConfigMap path bypasses kind node FS entirely).
Deployment dir layout is unchanged. Compose files, spec.yml, and
{deployment_dir}/config/<pod>/ remain exactly as today — trivially
diffable against stack source, no synthetic volume names. ConfigMaps
are visible only in k8s (kubectl get cm -n <ns>).
The existing `/mnt/host-path-*` skip in check_mounts_compatible is
retained as a transition tolerance for deployments created before
this change.
Updates:
- deployment_create: _validate_host_path_mounts() called per pod/job
in the create loops; 700 KiB ConfigMap budget (accounts for base64
+ metadata overhead)
- helpers: _generate_kind_mounts skips host-path entries;
volumes_for_pod_files emits ConfigMap-backed V1Volume;
volume_mounts_for_service takes optional deployment_dir and
auto-sets sub_path for single-file sources
- cluster_info: new _host_path_bind_configmaps() walked from
get_configmaps(); volume_mounts_for_service call passes
deployment_dir from spec.file_path
- docs: document the behavior and the rejected shapes in
deployment_patterns.md
- tests: k8s-deploy asserts the host-path ConfigMaps exist,
compose/spec unchanged, and no `/mnt/host-path-*` extraMounts
Refs: so-b86
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 13:13:43 +00:00
|
|
|
# Safety margin under the k8s ConfigMap 1 MiB hard limit. Accounts for
|
|
|
|
|
# base64 expansion (~33%) and ConfigMap metadata overhead.
|
|
|
|
|
_HOST_PATH_CONFIGMAP_BUDGET_BYTES = 700 * 1024
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _validate_host_path_mounts(parsed_pod_file, pod_name, pod_file_path):
|
|
|
|
|
"""Fail fast at deploy create on unsupported host-path compose volumes.
|
|
|
|
|
|
|
|
|
|
Host-path compose volumes (`<src>:<dst>[:opts]` with src starting
|
|
|
|
|
with /, ., or ~) flow through auto-generated ConfigMaps at deploy
|
|
|
|
|
start. ConfigMaps can't represent:
|
|
|
|
|
- directories with subdirectories (flat key space)
|
|
|
|
|
- content exceeding ~700 KiB (k8s 1 MiB limit minus base64/overhead)
|
|
|
|
|
- writable mounts (ConfigMap mounts are read-only)
|
|
|
|
|
|
|
|
|
|
Reject those shapes up front with a clear error so users don't hit
|
|
|
|
|
the failure later at start time.
|
|
|
|
|
|
|
|
|
|
Source resolution: compose paths like `../config/foo.sh` are
|
|
|
|
|
relative to the compose file location in the stack source tree at
|
|
|
|
|
deploy create time. At deploy start, the file is read from the
|
|
|
|
|
matching copy under `{deployment_dir}/config/{pod}/` that deploy
|
|
|
|
|
create lays down.
|
|
|
|
|
"""
|
|
|
|
|
compose_stack_dir = Path(pod_file_path).resolve().parent
|
|
|
|
|
services = parsed_pod_file.get("services") or {}
|
|
|
|
|
for service_name, service_info in services.items():
|
|
|
|
|
for volume_str in service_info.get("volumes") or []:
|
|
|
|
|
parts = volume_str.split(":")
|
|
|
|
|
if len(parts) < 2:
|
|
|
|
|
continue
|
|
|
|
|
src = parts[0]
|
|
|
|
|
if not is_host_path_mount(src):
|
|
|
|
|
continue
|
|
|
|
|
mount_opts = parts[2] if len(parts) > 2 else None
|
|
|
|
|
opt_tokens = (
|
|
|
|
|
[t.strip() for t in mount_opts.split(",") if t.strip()]
|
|
|
|
|
if mount_opts
|
|
|
|
|
else []
|
|
|
|
|
)
|
|
|
|
|
if "rw" in opt_tokens:
|
|
|
|
|
raise DeployerException(
|
|
|
|
|
f"Writable host-path bind not supported: "
|
|
|
|
|
f"'{volume_str}' in {pod_name}/{service_name}.\n"
|
|
|
|
|
"Host-path binds from the deployment directory are "
|
|
|
|
|
"static content injected as ConfigMaps (read-only). "
|
|
|
|
|
"Use a named volume with a spec-configured host path "
|
|
|
|
|
"under 'kind-mount-root' for writable data. See "
|
|
|
|
|
"docs/deployment_patterns.md."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
abs_src = (compose_stack_dir / src).resolve()
|
|
|
|
|
if not abs_src.exists():
|
|
|
|
|
# Preserve existing behavior — compose-level binds with
|
|
|
|
|
# missing sources fail later; don't introduce a new
|
|
|
|
|
# early failure mode here.
|
|
|
|
|
continue
|
|
|
|
|
if abs_src.is_file():
|
|
|
|
|
# Single files are always fine — one-key ConfigMap with
|
|
|
|
|
# subPath. Budget check here too in case of huge single
|
|
|
|
|
# files.
|
|
|
|
|
size = abs_src.stat().st_size
|
|
|
|
|
if size > _HOST_PATH_CONFIGMAP_BUDGET_BYTES:
|
|
|
|
|
raise DeployerException(
|
|
|
|
|
f"Host-path bind '{volume_str}' in "
|
|
|
|
|
f"{pod_name}/{service_name} points at a file of "
|
|
|
|
|
f"{size} bytes, exceeding the ConfigMap budget "
|
|
|
|
|
f"({_HOST_PATH_CONFIGMAP_BUDGET_BYTES} bytes "
|
|
|
|
|
f"after base64/overhead).\n\n"
|
|
|
|
|
"Embed the file in the container image at build "
|
|
|
|
|
"time, or split into multiple smaller files."
|
|
|
|
|
)
|
|
|
|
|
continue
|
|
|
|
|
if abs_src.is_dir():
|
|
|
|
|
entries = list(abs_src.iterdir())
|
|
|
|
|
if any(p.is_dir() for p in entries):
|
|
|
|
|
raise DeployerException(
|
|
|
|
|
f"Directory host-path bind '{volume_str}' in "
|
|
|
|
|
f"{pod_name}/{service_name} contains "
|
|
|
|
|
"subdirectories, which cannot be represented "
|
|
|
|
|
"in a k8s ConfigMap.\n\n"
|
|
|
|
|
"Restructure the stack to either:\n"
|
|
|
|
|
" - embed the directory in the container "
|
|
|
|
|
"image at build time,\n"
|
|
|
|
|
" - split into multiple ConfigMap entries "
|
|
|
|
|
"(one per subdir),\n"
|
|
|
|
|
" - or use an initContainer to populate the "
|
|
|
|
|
"content at runtime.\n\n"
|
|
|
|
|
"See docs/deployment_patterns.md."
|
|
|
|
|
)
|
|
|
|
|
total = sum(
|
|
|
|
|
p.stat().st_size for p in entries if p.is_file()
|
|
|
|
|
)
|
|
|
|
|
if total > _HOST_PATH_CONFIGMAP_BUDGET_BYTES:
|
|
|
|
|
raise DeployerException(
|
|
|
|
|
f"Directory host-path bind '{volume_str}' in "
|
|
|
|
|
f"{pod_name}/{service_name} totals {total} "
|
|
|
|
|
f"bytes, exceeding the ConfigMap budget "
|
|
|
|
|
f"({_HOST_PATH_CONFIGMAP_BUDGET_BYTES} bytes "
|
|
|
|
|
f"after base64/overhead).\n\n"
|
|
|
|
|
"Embed the content in the container image at "
|
|
|
|
|
"build time, or split into smaller ConfigMaps. "
|
|
|
|
|
"See docs/deployment_patterns.md."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# _find_extra_config_dirs: Find config dirs referenced in the pod files
|
2023-07-18 14:59:07 +00:00
|
|
|
# other than the one associated with the pod
|
|
|
|
|
def _find_extra_config_dirs(parsed_pod_file, pod):
|
|
|
|
|
config_dirs = set()
|
|
|
|
|
services = parsed_pod_file["services"]
|
|
|
|
|
for service in services:
|
|
|
|
|
service_info = services[service]
|
|
|
|
|
if "volumes" in service_info:
|
|
|
|
|
for volume in service_info["volumes"]:
|
|
|
|
|
if ":" in volume:
|
|
|
|
|
host_path = volume.split(":")[0]
|
|
|
|
|
if host_path.startswith("../config"):
|
|
|
|
|
config_dir = host_path.split("/")[2]
|
|
|
|
|
if config_dir != pod:
|
|
|
|
|
config_dirs.add(config_dir)
|
2024-07-09 15:37:35 +00:00
|
|
|
for env_file in service_info.get("env_file", []):
|
|
|
|
|
if env_file.startswith("../config"):
|
|
|
|
|
config_dir = env_file.split("/")[2]
|
|
|
|
|
if config_dir != pod:
|
|
|
|
|
config_dirs.add(config_dir)
|
2023-07-18 14:59:07 +00:00
|
|
|
return config_dirs
|
|
|
|
|
|
|
|
|
|
|
2023-09-04 18:14:05 +00:00
|
|
|
def _get_mapped_ports(stack: str, map_recipe: str):
|
2026-01-22 01:58:31 +00:00
|
|
|
port_map_recipes = [
|
|
|
|
|
"any-variable-random",
|
|
|
|
|
"localhost-same",
|
|
|
|
|
"any-same",
|
|
|
|
|
"localhost-fixed-random",
|
|
|
|
|
"any-fixed-random",
|
|
|
|
|
]
|
2023-09-04 18:14:05 +00:00
|
|
|
ports = _get_ports(stack)
|
|
|
|
|
if ports:
|
|
|
|
|
# Implement any requested mapping recipe
|
|
|
|
|
if map_recipe:
|
|
|
|
|
if map_recipe in port_map_recipes:
|
|
|
|
|
for service in ports.keys():
|
|
|
|
|
ports_array = ports[service]
|
|
|
|
|
for x in range(0, len(ports_array)):
|
|
|
|
|
orig_port = ports_array[x]
|
2023-09-19 19:27:34 +00:00
|
|
|
# Strip /udp suffix if present
|
|
|
|
|
bare_orig_port = orig_port.replace("/udp", "")
|
2026-01-22 01:58:31 +00:00
|
|
|
random_port = random.randint(
|
|
|
|
|
20000, 50000
|
|
|
|
|
) # Beware: we're relying on luck to not collide
|
2023-09-04 18:14:05 +00:00
|
|
|
if map_recipe == "any-variable-random":
|
|
|
|
|
# This is the default so take no action
|
|
|
|
|
pass
|
|
|
|
|
elif map_recipe == "localhost-same":
|
|
|
|
|
# Replace instances of "- XX" with "- 127.0.0.1:XX"
|
2023-09-19 19:27:34 +00:00
|
|
|
ports_array[x] = f"127.0.0.1:{bare_orig_port}:{orig_port}"
|
2023-09-04 18:14:05 +00:00
|
|
|
elif map_recipe == "any-same":
|
|
|
|
|
# Replace instances of "- XX" with "- 0.0.0.0:XX"
|
2023-09-19 19:27:34 +00:00
|
|
|
ports_array[x] = f"0.0.0.0:{bare_orig_port}:{orig_port}"
|
2023-09-04 18:14:05 +00:00
|
|
|
elif map_recipe == "localhost-fixed-random":
|
|
|
|
|
# Replace instances of "- XX" with "- 127.0.0.1:<rnd>:XX"
|
|
|
|
|
ports_array[x] = f"127.0.0.1:{random_port}:{orig_port}"
|
|
|
|
|
elif map_recipe == "any-fixed-random":
|
|
|
|
|
# Replace instances of "- XX" with "- 0.0.0.0:<rnd>:XX"
|
|
|
|
|
ports_array[x] = f"0.0.0.0:{random_port}:{orig_port}"
|
|
|
|
|
else:
|
|
|
|
|
print("Error: bad map_recipe")
|
|
|
|
|
else:
|
2026-01-22 01:58:31 +00:00
|
|
|
print(
|
|
|
|
|
f"Error: --map-ports-to-host must specify one of: "
|
|
|
|
|
f"{port_map_recipes}"
|
|
|
|
|
)
|
2023-09-04 18:14:05 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
return ports
|
|
|
|
|
|
|
|
|
|
|
2023-10-03 18:49:15 +00:00
|
|
|
def _parse_config_variables(variable_values: str):
|
|
|
|
|
result = None
|
|
|
|
|
if variable_values:
|
|
|
|
|
value_pairs = variable_values.split(",")
|
|
|
|
|
if len(value_pairs):
|
|
|
|
|
result_values = {}
|
|
|
|
|
for value_pair in value_pairs:
|
|
|
|
|
variable_value_pair = value_pair.split("=")
|
|
|
|
|
if len(variable_value_pair) != 2:
|
|
|
|
|
print(f"ERROR: config argument is not valid: {variable_values}")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
variable_name = variable_value_pair[0]
|
|
|
|
|
variable_value = variable_value_pair[1]
|
|
|
|
|
result_values[variable_name] = variable_value
|
2023-11-29 04:14:02 +00:00
|
|
|
result = result_values
|
2023-10-03 18:49:15 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
@click.command()
|
2023-10-03 18:49:15 +00:00
|
|
|
@click.option("--config", help="Provide config variables for the deployment")
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--config-file", help="Provide config variables in a file for the deployment"
|
|
|
|
|
)
|
2023-11-20 16:12:57 +00:00
|
|
|
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--image-registry",
|
|
|
|
|
help="Provide a container image registry url for this k8s cluster",
|
|
|
|
|
)
|
2023-06-27 22:58:41 +00:00
|
|
|
@click.option("--output", required=True, help="Write yaml spec file here")
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--map-ports-to-host",
|
|
|
|
|
required=False,
|
|
|
|
|
help="Map ports to the host as one of: any-variable-random (default), "
|
|
|
|
|
"localhost-same, any-same, localhost-fixed-random, any-fixed-random",
|
|
|
|
|
)
|
2023-06-27 22:58:41 +00:00
|
|
|
@click.pass_context
|
2026-01-22 01:58:31 +00:00
|
|
|
def init(
|
|
|
|
|
ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host
|
|
|
|
|
):
|
2023-06-27 22:58:41 +00:00
|
|
|
stack = global_options(ctx).stack
|
2023-11-20 16:12:57 +00:00
|
|
|
deployer_type = ctx.obj.deployer.type
|
2023-11-28 05:02:16 +00:00
|
|
|
deploy_command_context = ctx.obj
|
|
|
|
|
return init_operation(
|
|
|
|
|
deploy_command_context,
|
2026-01-22 01:58:31 +00:00
|
|
|
stack,
|
|
|
|
|
deployer_type,
|
|
|
|
|
config,
|
|
|
|
|
config_file,
|
2023-11-29 04:14:02 +00:00
|
|
|
kube_config,
|
2023-11-28 05:02:16 +00:00
|
|
|
image_registry,
|
|
|
|
|
output,
|
2026-01-22 01:58:31 +00:00
|
|
|
map_ports_to_host,
|
|
|
|
|
)
|
2023-11-28 05:02:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# The init command's implementation is in a separate function so that we can
|
|
|
|
|
# call it from other commands, bypassing the click decoration stuff
|
2026-01-22 01:58:31 +00:00
|
|
|
def init_operation(
|
|
|
|
|
deploy_command_context,
|
|
|
|
|
stack,
|
|
|
|
|
deployer_type,
|
|
|
|
|
config,
|
|
|
|
|
config_file,
|
|
|
|
|
kube_config,
|
|
|
|
|
image_registry,
|
|
|
|
|
output,
|
|
|
|
|
map_ports_to_host,
|
|
|
|
|
):
|
2023-11-28 05:02:16 +00:00
|
|
|
default_spec_file_content = call_stack_deploy_init(deploy_command_context)
|
2023-11-21 03:23:55 +00:00
|
|
|
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
|
2023-11-20 16:12:57 +00:00
|
|
|
if deployer_type == "k8s":
|
2024-02-22 21:11:06 +00:00
|
|
|
if kube_config:
|
|
|
|
|
spec_file_content.update({constants.kube_config_key: kube_config})
|
|
|
|
|
else:
|
2023-11-28 05:02:16 +00:00
|
|
|
error_exit("--kube-config must be supplied with --deploy-to k8s")
|
2024-02-22 21:11:06 +00:00
|
|
|
if image_registry:
|
|
|
|
|
spec_file_content.update({constants.image_registry_key: image_registry})
|
|
|
|
|
else:
|
2026-01-22 01:58:31 +00:00
|
|
|
print(
|
|
|
|
|
"WARNING: --image-registry not specified, only default container "
|
|
|
|
|
"registries (eg, Docker Hub) will be available"
|
|
|
|
|
)
|
2023-11-21 03:23:55 +00:00
|
|
|
else:
|
|
|
|
|
# Check for --kube-config supplied for non-relevant deployer types
|
|
|
|
|
if kube_config is not None:
|
2026-01-22 01:58:31 +00:00
|
|
|
error_exit(
|
|
|
|
|
f"--kube-config is not allowed with a {deployer_type} deployment"
|
|
|
|
|
)
|
2023-11-21 03:23:55 +00:00
|
|
|
if image_registry is not None:
|
2026-01-22 01:58:31 +00:00
|
|
|
error_exit(
|
|
|
|
|
f"--image-registry is not allowed with a {deployer_type} deployment"
|
|
|
|
|
)
|
2023-07-25 16:16:19 +00:00
|
|
|
if default_spec_file_content:
|
|
|
|
|
spec_file_content.update(default_spec_file_content)
|
2023-10-03 18:49:15 +00:00
|
|
|
config_variables = _parse_config_variables(config)
|
2023-11-29 04:14:02 +00:00
|
|
|
# Implement merge, since update() overwrites
|
2023-10-03 18:49:15 +00:00
|
|
|
if config_variables:
|
2023-10-31 17:29:19 +00:00
|
|
|
orig_config = spec_file_content.get("config", {})
|
2023-11-29 04:14:02 +00:00
|
|
|
new_config = config_variables
|
2023-10-03 18:49:15 +00:00
|
|
|
merged_config = {**new_config, **orig_config}
|
|
|
|
|
spec_file_content.update({"config": merged_config})
|
2023-11-29 04:14:02 +00:00
|
|
|
if config_file:
|
|
|
|
|
config_file_path = Path(config_file)
|
|
|
|
|
if not config_file_path.exists():
|
|
|
|
|
error_exit(f"config file: {config_file} does not exist")
|
|
|
|
|
config_file_variables = env_var_map_from_file(config_file_path)
|
|
|
|
|
if config_file_variables:
|
|
|
|
|
orig_config = spec_file_content.get("config", {})
|
|
|
|
|
new_config = config_file_variables
|
|
|
|
|
merged_config = {**new_config, **orig_config}
|
|
|
|
|
spec_file_content.update({"config": merged_config})
|
2023-08-11 20:25:54 +00:00
|
|
|
|
2023-09-04 18:14:05 +00:00
|
|
|
ports = _get_mapped_ports(stack, map_ports_to_host)
|
2026-01-21 04:14:22 +00:00
|
|
|
orig_network = spec_file_content.get("network", {})
|
|
|
|
|
orig_network["ports"] = ports
|
|
|
|
|
spec_file_content["network"] = orig_network
|
2023-08-11 20:25:54 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
named_volumes = _get_named_volumes(stack)
|
|
|
|
|
if named_volumes:
|
|
|
|
|
volume_descriptors = {}
|
2024-01-31 05:09:48 +00:00
|
|
|
configmap_descriptors = {}
|
|
|
|
|
for named_volume in named_volumes["rw"]:
|
2024-02-14 21:45:01 +00:00
|
|
|
if "k8s" in deployer_type:
|
|
|
|
|
volume_descriptors[named_volume] = None
|
|
|
|
|
else:
|
|
|
|
|
volume_descriptors[named_volume] = f"./data/{named_volume}"
|
2024-01-31 05:09:48 +00:00
|
|
|
for named_volume in named_volumes["ro"]:
|
2024-02-14 21:45:01 +00:00
|
|
|
if "k8s" in deployer_type:
|
|
|
|
|
if "config" in named_volume:
|
|
|
|
|
configmap_descriptors[named_volume] = f"./configmaps/{named_volume}"
|
|
|
|
|
else:
|
|
|
|
|
volume_descriptors[named_volume] = None
|
2024-02-03 02:05:15 +00:00
|
|
|
else:
|
|
|
|
|
volume_descriptors[named_volume] = f"./data/{named_volume}"
|
2024-01-31 21:11:32 +00:00
|
|
|
if volume_descriptors:
|
2026-01-25 22:27:51 +00:00
|
|
|
# Merge with existing volumes from stack init()
|
|
|
|
|
# init() volumes take precedence over compose defaults
|
|
|
|
|
orig_volumes = spec_file_content.get("volumes", {})
|
|
|
|
|
spec_file_content["volumes"] = {**volume_descriptors, **orig_volumes}
|
2024-01-31 05:09:48 +00:00
|
|
|
if configmap_descriptors:
|
|
|
|
|
spec_file_content["configmaps"] = configmap_descriptors
|
2026-03-11 03:56:21 +00:00
|
|
|
if "k8s" in deployer_type:
|
|
|
|
|
if "secrets" not in spec_file_content:
|
|
|
|
|
spec_file_content["secrets"] = {}
|
2023-08-11 20:25:54 +00:00
|
|
|
|
2023-12-06 05:56:58 +00:00
|
|
|
if opts.o.debug:
|
2026-01-22 01:58:31 +00:00
|
|
|
print(
|
|
|
|
|
f"Creating spec file for stack: {stack} with content: {spec_file_content}"
|
|
|
|
|
)
|
2023-12-06 05:56:58 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
with open(output, "w") as output_file:
|
2023-12-06 05:56:58 +00:00
|
|
|
get_yaml().dump(spec_file_content, output_file)
|
2023-06-27 22:58:41 +00:00
|
|
|
|
|
|
|
|
|
2026-02-03 05:55:14 +00:00
|
|
|
# Token pattern: $generate:hex:32$ or $generate:base64:16$
|
|
|
|
|
GENERATE_TOKEN_PATTERN = re.compile(r"\$generate:(\w+):(\d+)\$")
|
|
|
|
|
|
|
|
|
|
|
2026-03-16 08:01:11 +00:00
|
|
|
def _generate_and_store_secrets(
|
|
|
|
|
config_vars: dict, deployment_name: str, namespace: str = "default"
|
|
|
|
|
):
|
2026-02-03 05:55:14 +00:00
|
|
|
"""Generate secrets for $generate:...$ tokens and store in K8s Secret.
|
|
|
|
|
|
|
|
|
|
Called by `deploy create` - generates fresh secrets and stores them.
|
|
|
|
|
Returns the generated secrets dict for reference.
|
|
|
|
|
"""
|
|
|
|
|
from kubernetes import client, config as k8s_config
|
|
|
|
|
|
|
|
|
|
secrets = {}
|
|
|
|
|
for name, value in config_vars.items():
|
|
|
|
|
if not isinstance(value, str):
|
|
|
|
|
continue
|
|
|
|
|
match = GENERATE_TOKEN_PATTERN.search(value)
|
|
|
|
|
if not match:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
secret_type, length = match.group(1), int(match.group(2))
|
|
|
|
|
if secret_type == "hex":
|
|
|
|
|
secrets[name] = token_hex(length)
|
|
|
|
|
elif secret_type == "base64":
|
|
|
|
|
secrets[name] = base64.b64encode(os.urandom(length)).decode()
|
|
|
|
|
else:
|
|
|
|
|
secrets[name] = token_hex(length)
|
|
|
|
|
|
|
|
|
|
if not secrets:
|
|
|
|
|
return secrets
|
|
|
|
|
|
|
|
|
|
# Store in K8s Secret
|
|
|
|
|
try:
|
|
|
|
|
k8s_config.load_kube_config()
|
|
|
|
|
except Exception:
|
|
|
|
|
# Fall back to in-cluster config if available
|
|
|
|
|
try:
|
|
|
|
|
k8s_config.load_incluster_config()
|
|
|
|
|
except Exception:
|
|
|
|
|
print(
|
|
|
|
|
"Warning: Could not load kube config, secrets will not be stored in K8s"
|
|
|
|
|
)
|
|
|
|
|
return secrets
|
|
|
|
|
|
|
|
|
|
v1 = client.CoreV1Api()
|
|
|
|
|
secret_name = f"{deployment_name}-generated-secrets"
|
|
|
|
|
|
|
|
|
|
secret_data = {k: base64.b64encode(v.encode()).decode() for k, v in secrets.items()}
|
|
|
|
|
k8s_secret = client.V1Secret(
|
|
|
|
|
metadata=client.V1ObjectMeta(name=secret_name), data=secret_data, type="Opaque"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
v1.create_namespaced_secret(namespace, k8s_secret)
|
|
|
|
|
num_secrets = len(secrets)
|
|
|
|
|
print(f"Created K8s Secret '{secret_name}' with {num_secrets} secret(s)")
|
|
|
|
|
except client.exceptions.ApiException as e:
|
|
|
|
|
if e.status == 409: # Already exists
|
|
|
|
|
v1.replace_namespaced_secret(secret_name, namespace, k8s_secret)
|
|
|
|
|
num_secrets = len(secrets)
|
|
|
|
|
print(f"Updated K8s Secret '{secret_name}' with {num_secrets} secret(s)")
|
|
|
|
|
else:
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
return secrets
|
|
|
|
|
|
|
|
|
|
|
2026-03-18 19:08:52 +00:00
|
|
|
def create_registry_secret(
|
|
|
|
|
spec: Spec, deployment_name: str, namespace: str = "default"
|
|
|
|
|
) -> Optional[str]:
|
2026-02-03 17:25:33 +00:00
|
|
|
"""Create K8s docker-registry secret from spec + environment.
|
|
|
|
|
|
|
|
|
|
Reads registry configuration from spec.yml and creates a Kubernetes
|
|
|
|
|
secret of type kubernetes.io/dockerconfigjson for image pulls.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
spec: The deployment spec containing image-registry config
|
|
|
|
|
deployment_name: Name of the deployment (used for secret naming)
|
2026-03-18 19:08:52 +00:00
|
|
|
namespace: K8s namespace to create the secret in
|
2026-03-26 08:36:39 +00:00
|
|
|
namespace: Kubernetes namespace to create the secret in
|
2026-02-03 17:25:33 +00:00
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
The secret name if created, None if no registry config
|
|
|
|
|
"""
|
|
|
|
|
from kubernetes import client, config as k8s_config
|
|
|
|
|
|
|
|
|
|
registry_config = spec.get_image_registry_config()
|
|
|
|
|
if not registry_config:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
server = registry_config.get("server")
|
|
|
|
|
username = registry_config.get("username")
|
|
|
|
|
token_env = registry_config.get("token-env")
|
2026-03-20 19:30:44 +00:00
|
|
|
token_file = registry_config.get("token-file")
|
2026-02-03 17:25:33 +00:00
|
|
|
|
2026-03-20 19:30:44 +00:00
|
|
|
if not server or not username:
|
2026-02-03 17:25:33 +00:00
|
|
|
return None
|
2026-03-20 19:30:44 +00:00
|
|
|
if not token_env and not token_file:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# Resolve token: file takes precedence over env var
|
|
|
|
|
token = None
|
|
|
|
|
if token_file:
|
|
|
|
|
token_path = os.path.expanduser(token_file)
|
|
|
|
|
if os.path.exists(token_path):
|
|
|
|
|
with open(token_path) as f:
|
|
|
|
|
token = f.read().strip()
|
|
|
|
|
else:
|
|
|
|
|
print(f"Warning: Registry token file '{token_path}' not found")
|
|
|
|
|
if not token and token_env:
|
|
|
|
|
token = os.environ.get(token_env)
|
2026-02-03 17:25:33 +00:00
|
|
|
|
|
|
|
|
if not token:
|
2026-03-20 19:30:44 +00:00
|
|
|
source = token_file or token_env
|
2026-02-03 17:25:33 +00:00
|
|
|
print(
|
2026-03-20 19:30:44 +00:00
|
|
|
f"Warning: Registry token not available from '{source}', "
|
2026-02-03 17:25:33 +00:00
|
|
|
"skipping registry secret"
|
|
|
|
|
)
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# Create dockerconfigjson format (Docker API uses "password" field for tokens)
|
|
|
|
|
auth = base64.b64encode(f"{username}:{token}".encode()).decode()
|
|
|
|
|
docker_config = {
|
|
|
|
|
"auths": {server: {"username": username, "password": token, "auth": auth}}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Secret name derived from deployment name
|
2026-03-18 15:33:11 +00:00
|
|
|
secret_name = f"{deployment_name}-image-pull-secret"
|
2026-02-03 17:25:33 +00:00
|
|
|
|
|
|
|
|
# Load kube config
|
|
|
|
|
try:
|
|
|
|
|
k8s_config.load_kube_config()
|
|
|
|
|
except Exception:
|
|
|
|
|
try:
|
|
|
|
|
k8s_config.load_incluster_config()
|
|
|
|
|
except Exception:
|
|
|
|
|
print("Warning: Could not load kube config, registry secret not created")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
v1 = client.CoreV1Api()
|
|
|
|
|
|
|
|
|
|
k8s_secret = client.V1Secret(
|
|
|
|
|
metadata=client.V1ObjectMeta(name=secret_name),
|
|
|
|
|
data={
|
|
|
|
|
".dockerconfigjson": base64.b64encode(
|
|
|
|
|
json.dumps(docker_config).encode()
|
|
|
|
|
).decode()
|
|
|
|
|
},
|
|
|
|
|
type="kubernetes.io/dockerconfigjson",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
v1.create_namespaced_secret(namespace, k8s_secret)
|
|
|
|
|
print(f"Created registry secret '{secret_name}' for {server}")
|
|
|
|
|
except client.exceptions.ApiException as e:
|
|
|
|
|
if e.status == 409: # Already exists
|
|
|
|
|
v1.replace_namespaced_secret(secret_name, namespace, k8s_secret)
|
|
|
|
|
print(f"Updated registry secret '{secret_name}' for {server}")
|
|
|
|
|
else:
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
return secret_name
|
|
|
|
|
|
|
|
|
|
|
2026-02-03 05:55:14 +00:00
|
|
|
def _write_config_file(
|
2026-03-16 08:01:11 +00:00
|
|
|
spec_file: Path,
|
|
|
|
|
config_env_file: Path,
|
|
|
|
|
deployment_name: Optional[str] = None,
|
|
|
|
|
namespace: str = "default",
|
2026-02-03 05:55:14 +00:00
|
|
|
):
|
2026-03-07 08:47:12 +00:00
|
|
|
"""Write spec.yml config: entries to config.env.
|
|
|
|
|
|
|
|
|
|
The config: section in spec.yml should contain only deployment-specific
|
|
|
|
|
overrides — values that differ between deployments (hostnames, endpoints,
|
|
|
|
|
credentials, secrets via $generate:...$).
|
|
|
|
|
|
|
|
|
|
Application defaults (ports, log levels, feature flags, tuning params)
|
|
|
|
|
belong in the compose file's environment section. The compose file is
|
|
|
|
|
the single source of truth for what env vars a service accepts and
|
|
|
|
|
their default values. spec.yml overrides those defaults for a specific
|
|
|
|
|
deployment.
|
|
|
|
|
"""
|
2023-10-03 18:49:15 +00:00
|
|
|
spec_content = get_parsed_deployment_spec(spec_file)
|
2026-02-03 05:55:14 +00:00
|
|
|
config_vars = spec_content.get("config", {}) or {}
|
|
|
|
|
|
|
|
|
|
# Generate and store secrets in K8s if deployment_name provided and tokens exist
|
|
|
|
|
if deployment_name and config_vars:
|
|
|
|
|
has_generate_tokens = any(
|
|
|
|
|
isinstance(v, str) and GENERATE_TOKEN_PATTERN.search(v)
|
|
|
|
|
for v in config_vars.values()
|
|
|
|
|
)
|
|
|
|
|
if has_generate_tokens:
|
2026-03-16 08:01:11 +00:00
|
|
|
_generate_and_store_secrets(config_vars, deployment_name, namespace)
|
2026-02-03 05:55:14 +00:00
|
|
|
|
|
|
|
|
# Write non-secret config to config.env (exclude $generate:...$ tokens)
|
2023-10-09 20:54:55 +00:00
|
|
|
with open(config_env_file, "w") as output_file:
|
2026-03-18 18:24:27 +00:00
|
|
|
output_file.write(
|
|
|
|
|
"# AUTO-GENERATED by laconic-so from spec.yml config section.\n"
|
|
|
|
|
"# Source: stack_orchestrator/deploy/deployment_create.py"
|
|
|
|
|
" _write_config_file()\n"
|
|
|
|
|
"# Do not edit — changes will be overwritten on deploy create"
|
|
|
|
|
" or restart.\n"
|
|
|
|
|
"# To change config, edit the config section in your spec.yml"
|
|
|
|
|
" and redeploy.\n"
|
|
|
|
|
)
|
2026-02-03 05:55:14 +00:00
|
|
|
if config_vars:
|
|
|
|
|
for variable_name, variable_value in config_vars.items():
|
|
|
|
|
# Skip variables with generate tokens - they go to K8s Secret
|
|
|
|
|
if isinstance(variable_value, str) and GENERATE_TOKEN_PATTERN.search(
|
|
|
|
|
variable_value
|
|
|
|
|
):
|
|
|
|
|
continue
|
|
|
|
|
output_file.write(f"{variable_name}={variable_value}\n")
|
2023-10-03 18:49:15 +00:00
|
|
|
|
2026-03-18 21:55:28 +00:00
|
|
|
# Append contents of credentials files listed in spec
|
|
|
|
|
credentials_files = spec_content.get("credentials-files", []) or []
|
|
|
|
|
for cred_path_str in credentials_files:
|
|
|
|
|
cred_path = Path(cred_path_str).expanduser()
|
|
|
|
|
if not cred_path.exists():
|
|
|
|
|
print(f"Error: credentials file does not exist: {cred_path}")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
output_file.write(f"# From credentials file: {cred_path_str}\n")
|
|
|
|
|
contents = cred_path.read_text()
|
|
|
|
|
output_file.write(contents)
|
|
|
|
|
if not contents.endswith("\n"):
|
|
|
|
|
output_file.write("\n")
|
|
|
|
|
|
2023-10-03 18:49:15 +00:00
|
|
|
|
2023-11-20 16:12:57 +00:00
|
|
|
def _write_kube_config_file(external_path: Path, internal_path: Path):
|
|
|
|
|
if not external_path.exists():
|
|
|
|
|
error_exit(f"Kube config file {external_path} does not exist")
|
|
|
|
|
copyfile(external_path, internal_path)
|
|
|
|
|
|
|
|
|
|
|
2023-10-09 20:54:55 +00:00
|
|
|
def _copy_files_to_directory(file_paths: List[Path], directory: Path):
|
|
|
|
|
for path in file_paths:
|
|
|
|
|
# Using copy to preserve the execute bit
|
|
|
|
|
copy(path, os.path.join(directory, os.path.basename(path)))
|
|
|
|
|
|
|
|
|
|
|
2026-03-16 08:01:11 +00:00
|
|
|
def _get_existing_kind_cluster() -> Optional[str]:
|
|
|
|
|
"""Return the name of an existing Kind cluster, or None."""
|
|
|
|
|
try:
|
|
|
|
|
result = subprocess.run(
|
|
|
|
|
["kind", "get", "clusters"],
|
|
|
|
|
capture_output=True,
|
|
|
|
|
text=True,
|
|
|
|
|
timeout=10,
|
|
|
|
|
)
|
|
|
|
|
if result.returncode == 0:
|
|
|
|
|
clusters = [
|
|
|
|
|
c.strip() for c in result.stdout.strip().splitlines() if c.strip()
|
|
|
|
|
]
|
|
|
|
|
if clusters:
|
|
|
|
|
return clusters[0]
|
|
|
|
|
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
|
|
|
pass
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
2026-02-01 00:05:27 +00:00
|
|
|
def _create_deployment_file(deployment_dir: Path, stack_source: Optional[Path] = None):
|
2023-12-06 05:56:58 +00:00
|
|
|
deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name)
|
2026-03-16 08:01:11 +00:00
|
|
|
# Reuse existing Kind cluster if one exists, otherwise generate a timestamp-based ID
|
|
|
|
|
existing = _get_existing_kind_cluster()
|
|
|
|
|
cluster = existing if existing else generate_id("laconic")
|
2026-02-01 00:05:27 +00:00
|
|
|
deployment_content = {constants.cluster_id_key: cluster}
|
|
|
|
|
if stack_source:
|
|
|
|
|
deployment_content["stack-source"] = str(stack_source)
|
2023-12-06 05:56:58 +00:00
|
|
|
with open(deployment_file_path, "w") as output_file:
|
2026-02-01 00:05:27 +00:00
|
|
|
get_yaml().dump(deployment_content, output_file)
|
2023-12-06 05:56:58 +00:00
|
|
|
|
|
|
|
|
|
2024-02-14 21:45:01 +00:00
|
|
|
def _check_volume_definitions(spec):
|
|
|
|
|
if spec.is_kubernetes_deployment():
|
|
|
|
|
for volume_name, volume_path in spec.get_volumes().items():
|
|
|
|
|
if volume_path:
|
|
|
|
|
if not os.path.isabs(volume_path):
|
2026-02-03 04:26:13 +00:00
|
|
|
# For k8s-kind: allow relative paths, they'll be resolved
|
|
|
|
|
# by _make_absolute_host_path() during kind config generation
|
|
|
|
|
if not spec.is_kind_deployment():
|
|
|
|
|
deploy_type = spec.get_deployment_type()
|
|
|
|
|
raise Exception(
|
|
|
|
|
f"Relative path {volume_path} for volume "
|
|
|
|
|
f"{volume_name} not supported for {deploy_type}"
|
|
|
|
|
)
|
2024-02-14 21:45:01 +00:00
|
|
|
|
|
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
@click.command()
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--spec-file", required=True, help="Spec file to use to create this deployment"
|
|
|
|
|
)
|
2023-06-27 22:58:41 +00:00
|
|
|
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
2025-10-17 15:21:23 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--update",
|
|
|
|
|
is_flag=True,
|
|
|
|
|
default=False,
|
|
|
|
|
help="Update existing deployment directory, preserving data volumes and env file",
|
|
|
|
|
)
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--helm-chart",
|
|
|
|
|
is_flag=True,
|
|
|
|
|
default=False,
|
|
|
|
|
help="Generate Helm chart instead of deploying (k8s only)",
|
|
|
|
|
)
|
2023-08-23 21:20:28 +00:00
|
|
|
# TODO: Hack
|
|
|
|
|
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
2023-09-04 21:13:23 +00:00
|
|
|
@click.option("--initial-peers", help="Initial set of persistent peers")
|
2026-01-22 08:06:45 +00:00
|
|
|
@click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
|
2023-06-27 22:58:41 +00:00
|
|
|
@click.pass_context
|
2026-01-22 08:06:45 +00:00
|
|
|
def create(
|
2025-10-17 15:21:23 +00:00
|
|
|
ctx,
|
|
|
|
|
spec_file,
|
|
|
|
|
deployment_dir,
|
|
|
|
|
update,
|
|
|
|
|
helm_chart,
|
|
|
|
|
network_dir,
|
|
|
|
|
initial_peers,
|
|
|
|
|
extra_args,
|
2026-01-22 08:06:45 +00:00
|
|
|
):
|
2023-11-28 05:02:16 +00:00
|
|
|
deployment_command_context = ctx.obj
|
2026-01-22 01:58:31 +00:00
|
|
|
return create_operation(
|
|
|
|
|
deployment_command_context,
|
|
|
|
|
spec_file,
|
|
|
|
|
deployment_dir,
|
2025-10-17 15:21:23 +00:00
|
|
|
update,
|
2026-01-22 01:58:31 +00:00
|
|
|
helm_chart,
|
|
|
|
|
network_dir,
|
|
|
|
|
initial_peers,
|
2026-01-22 08:06:45 +00:00
|
|
|
extra_args,
|
2026-01-22 01:58:31 +00:00
|
|
|
)
|
2023-11-28 05:02:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# The init command's implementation is in a separate function so that we can
|
|
|
|
|
# call it from other commands, bypassing the click decoration stuff
|
2026-01-22 01:58:31 +00:00
|
|
|
def create_operation(
|
|
|
|
|
deployment_command_context,
|
|
|
|
|
spec_file,
|
|
|
|
|
deployment_dir,
|
2025-10-17 15:21:23 +00:00
|
|
|
update=False,
|
2026-01-24 21:48:11 +00:00
|
|
|
helm_chart=False,
|
|
|
|
|
network_dir=None,
|
|
|
|
|
initial_peers=None,
|
2026-01-22 08:06:45 +00:00
|
|
|
extra_args=(),
|
2026-01-22 01:58:31 +00:00
|
|
|
):
|
|
|
|
|
parsed_spec = Spec(
|
|
|
|
|
os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file)
|
|
|
|
|
)
|
2024-02-14 21:45:01 +00:00
|
|
|
_check_volume_definitions(parsed_spec)
|
2023-11-08 08:11:00 +00:00
|
|
|
stack_name = parsed_spec["stack"]
|
2023-11-21 03:23:55 +00:00
|
|
|
deployment_type = parsed_spec[constants.deploy_to_key]
|
2025-11-27 06:43:07 +00:00
|
|
|
|
2023-11-28 05:02:16 +00:00
|
|
|
if opts.o.debug:
|
2023-06-27 22:58:41 +00:00
|
|
|
print(f"parsed spec: {parsed_spec}")
|
2025-10-17 15:21:23 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
if deployment_dir is None:
|
2023-11-20 16:12:57 +00:00
|
|
|
deployment_dir_path = _make_default_deployment_dir()
|
|
|
|
|
else:
|
|
|
|
|
deployment_dir_path = Path(deployment_dir)
|
2025-12-04 06:13:28 +00:00
|
|
|
|
2025-10-17 15:21:23 +00:00
|
|
|
if deployment_dir_path.exists():
|
|
|
|
|
if not update:
|
|
|
|
|
error_exit(f"{deployment_dir_path} already exists")
|
|
|
|
|
if opts.o.debug:
|
|
|
|
|
print(f"Updating existing deployment at {deployment_dir_path}")
|
|
|
|
|
else:
|
|
|
|
|
if update:
|
|
|
|
|
error_exit(f"--update requires that {deployment_dir_path} already exists")
|
|
|
|
|
os.mkdir(deployment_dir_path)
|
2025-12-04 06:13:28 +00:00
|
|
|
|
|
|
|
|
# Branch to Helm chart generation flow if --helm-chart flag is set
|
|
|
|
|
if deployment_type == "k8s" and helm_chart:
|
2026-01-22 01:58:31 +00:00
|
|
|
from stack_orchestrator.deploy.k8s.helm.chart_generator import (
|
|
|
|
|
generate_helm_chart,
|
|
|
|
|
)
|
|
|
|
|
|
2025-12-04 06:13:28 +00:00
|
|
|
generate_helm_chart(stack_name, spec_file, deployment_dir_path)
|
|
|
|
|
return # Exit early for helm chart generation
|
|
|
|
|
|
2026-02-01 00:05:27 +00:00
|
|
|
# Resolve stack source path for restart capability
|
|
|
|
|
stack_source = get_stack_path(stack_name)
|
|
|
|
|
|
2025-10-17 15:21:23 +00:00
|
|
|
if update:
|
|
|
|
|
# Sync mode: write to temp dir, then copy to deployment dir with backups
|
|
|
|
|
temp_dir = Path(tempfile.mkdtemp(prefix="deployment-sync-"))
|
|
|
|
|
try:
|
2026-02-01 00:05:27 +00:00
|
|
|
# Write deployment files to temp dir
|
|
|
|
|
# (skip deployment.yml to preserve cluster ID)
|
2025-10-17 15:21:23 +00:00
|
|
|
_write_deployment_files(
|
|
|
|
|
temp_dir,
|
|
|
|
|
Path(spec_file),
|
|
|
|
|
parsed_spec,
|
|
|
|
|
stack_name,
|
|
|
|
|
deployment_type,
|
|
|
|
|
include_deployment_file=False,
|
2026-02-01 00:05:27 +00:00
|
|
|
stack_source=stack_source,
|
2025-10-17 15:21:23 +00:00
|
|
|
)
|
|
|
|
|
|
2026-02-01 00:05:27 +00:00
|
|
|
# Copy from temp to deployment dir, excluding data volumes
|
|
|
|
|
# and backing up changed files.
|
|
|
|
|
# Exclude data/* to avoid touching user data volumes.
|
2026-03-17 08:20:45 +00:00
|
|
|
exclude_patterns = ["data", "data/*"]
|
2025-10-17 15:21:23 +00:00
|
|
|
_safe_copy_tree(
|
|
|
|
|
temp_dir, deployment_dir_path, exclude_patterns=exclude_patterns
|
|
|
|
|
)
|
|
|
|
|
finally:
|
|
|
|
|
# Clean up temp dir
|
|
|
|
|
rmtree(temp_dir)
|
|
|
|
|
else:
|
|
|
|
|
# Normal mode: write directly to deployment dir
|
|
|
|
|
_write_deployment_files(
|
|
|
|
|
deployment_dir_path,
|
|
|
|
|
Path(spec_file),
|
|
|
|
|
parsed_spec,
|
|
|
|
|
stack_name,
|
|
|
|
|
deployment_type,
|
|
|
|
|
include_deployment_file=True,
|
2026-02-01 00:05:27 +00:00
|
|
|
stack_source=stack_source,
|
2025-10-17 15:21:23 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Delegate to the stack's Python code
|
|
|
|
|
# The deploy create command doesn't require a --stack argument so we need
|
|
|
|
|
# to insert the stack member here.
|
|
|
|
|
deployment_command_context.stack = stack_name
|
|
|
|
|
deployment_context = DeploymentContext()
|
|
|
|
|
deployment_context.init(deployment_dir_path)
|
|
|
|
|
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
|
|
|
|
deployer_config_generator = getDeployerConfigGenerator(
|
|
|
|
|
deployment_type, deployment_context
|
2026-01-22 01:58:31 +00:00
|
|
|
)
|
2025-10-17 15:21:23 +00:00
|
|
|
# TODO: make deployment_dir_path a Path above
|
|
|
|
|
if deployer_config_generator is not None:
|
|
|
|
|
deployer_config_generator.generate(deployment_dir_path)
|
|
|
|
|
call_stack_deploy_create(
|
|
|
|
|
deployment_context, [network_dir, initial_peers, *extra_args]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2026-02-01 00:05:27 +00:00
|
|
|
def _safe_copy_tree(src: Path, dst: Path, exclude_patterns: Optional[List[str]] = None):
|
2025-10-17 15:21:23 +00:00
|
|
|
"""
|
|
|
|
|
Recursively copy a directory tree, backing up changed files with .bak suffix.
|
|
|
|
|
|
|
|
|
|
:param src: Source directory
|
|
|
|
|
:param dst: Destination directory
|
|
|
|
|
:param exclude_patterns: List of path patterns to exclude (relative to src)
|
|
|
|
|
"""
|
|
|
|
|
if exclude_patterns is None:
|
|
|
|
|
exclude_patterns = []
|
|
|
|
|
|
|
|
|
|
def should_exclude(path: Path) -> bool:
|
|
|
|
|
"""Check if path matches any exclude pattern."""
|
|
|
|
|
rel_path = path.relative_to(src)
|
|
|
|
|
for pattern in exclude_patterns:
|
|
|
|
|
if rel_path.match(pattern):
|
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
def safe_copy_file(src_file: Path, dst_file: Path):
|
|
|
|
|
"""Copy file, backing up destination if it differs."""
|
|
|
|
|
if (
|
|
|
|
|
dst_file.exists()
|
|
|
|
|
and not dst_file.is_dir()
|
|
|
|
|
and not filecmp.cmp(src_file, dst_file)
|
|
|
|
|
):
|
|
|
|
|
os.rename(dst_file, f"{dst_file}.bak")
|
|
|
|
|
copy(src_file, dst_file)
|
|
|
|
|
|
|
|
|
|
# Walk the source tree
|
|
|
|
|
for src_path in src.rglob("*"):
|
|
|
|
|
if should_exclude(src_path):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
rel_path = src_path.relative_to(src)
|
|
|
|
|
dst_path = dst / rel_path
|
|
|
|
|
|
|
|
|
|
if src_path.is_dir():
|
|
|
|
|
dst_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
else:
|
|
|
|
|
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
safe_copy_file(src_path, dst_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _write_deployment_files(
|
|
|
|
|
target_dir: Path,
|
|
|
|
|
spec_file: Path,
|
|
|
|
|
parsed_spec: Spec,
|
|
|
|
|
stack_name: str,
|
|
|
|
|
deployment_type: str,
|
|
|
|
|
include_deployment_file: bool = True,
|
2026-02-01 00:05:27 +00:00
|
|
|
stack_source: Optional[Path] = None,
|
2025-10-17 15:21:23 +00:00
|
|
|
):
|
|
|
|
|
"""
|
|
|
|
|
Write deployment files to target directory.
|
|
|
|
|
|
|
|
|
|
:param target_dir: Directory to write files to
|
|
|
|
|
:param spec_file: Path to spec file
|
|
|
|
|
:param parsed_spec: Parsed spec object
|
|
|
|
|
:param stack_name: Name of stack
|
|
|
|
|
:param deployment_type: Type of deployment
|
2026-02-01 00:05:27 +00:00
|
|
|
:param include_deployment_file: Whether to create deployment.yml (skip for update)
|
|
|
|
|
:param stack_source: Path to stack source (git repo) for restart capability
|
2025-10-17 15:21:23 +00:00
|
|
|
"""
|
|
|
|
|
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
|
|
|
|
|
parsed_stack = get_parsed_stack_config(stack_name)
|
|
|
|
|
|
|
|
|
|
# Copy spec file and the stack file into the target dir
|
|
|
|
|
copyfile(spec_file, target_dir.joinpath(constants.spec_file_name))
|
|
|
|
|
copyfile(stack_file, target_dir.joinpath(constants.stack_file_name))
|
|
|
|
|
|
|
|
|
|
# Create deployment file if requested
|
|
|
|
|
if include_deployment_file:
|
2026-02-01 00:05:27 +00:00
|
|
|
_create_deployment_file(target_dir, stack_source=stack_source)
|
2025-10-17 15:21:23 +00:00
|
|
|
|
|
|
|
|
# Copy any config variables from the spec file into an env file suitable for compose
|
2026-02-03 05:55:14 +00:00
|
|
|
# Use stack_name as deployment_name for K8s secret naming
|
2026-02-03 06:40:37 +00:00
|
|
|
# Extract just the name part if stack_name is a path ("path/to/stack" -> "stack")
|
|
|
|
|
deployment_name = Path(stack_name).name.replace("_", "-")
|
2026-03-16 08:01:11 +00:00
|
|
|
# Derive namespace from spec or stack name, matching deploy_k8s logic
|
|
|
|
|
namespace = parsed_spec.get_namespace() or f"laconic-{deployment_name}"
|
2026-02-03 05:55:14 +00:00
|
|
|
_write_config_file(
|
2026-03-16 08:01:11 +00:00
|
|
|
spec_file,
|
|
|
|
|
target_dir.joinpath(constants.config_file_name),
|
|
|
|
|
deployment_name,
|
|
|
|
|
namespace=namespace,
|
2026-02-03 05:55:14 +00:00
|
|
|
)
|
2025-10-17 15:21:23 +00:00
|
|
|
|
|
|
|
|
# Copy any k8s config file into the target dir
|
2023-11-20 16:12:57 +00:00
|
|
|
if deployment_type == "k8s":
|
2026-01-22 01:58:31 +00:00
|
|
|
_write_kube_config_file(
|
|
|
|
|
Path(parsed_spec[constants.kube_config_key]),
|
2025-10-17 15:21:23 +00:00
|
|
|
target_dir.joinpath(constants.kube_config_filename),
|
2026-01-22 01:58:31 +00:00
|
|
|
)
|
2025-10-17 15:21:23 +00:00
|
|
|
|
|
|
|
|
# Copy the pod files into the target dir, fixing up content
|
2023-10-09 20:54:55 +00:00
|
|
|
pods = get_pod_list(parsed_stack)
|
2025-10-17 15:21:23 +00:00
|
|
|
destination_compose_dir = target_dir.joinpath("compose")
|
|
|
|
|
os.makedirs(destination_compose_dir, exist_ok=True)
|
|
|
|
|
destination_pods_dir = target_dir.joinpath("pods")
|
|
|
|
|
os.makedirs(destination_pods_dir, exist_ok=True)
|
2023-07-24 02:54:05 +00:00
|
|
|
yaml = get_yaml()
|
2025-10-17 15:21:23 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
for pod in pods:
|
2024-04-18 21:22:47 +00:00
|
|
|
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
2026-01-22 06:10:36 +00:00
|
|
|
if pod_file_path is None:
|
|
|
|
|
continue
|
2023-06-27 22:58:41 +00:00
|
|
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
feat(k8s): auto-ConfigMap for file-level host-path compose volumes
File-level host-path compose volumes (e.g. `../config/foo.sh:/opt/foo.sh`)
were synthesized into a kind extraMount + hostPath PV chain with a
sanitized containerPath (`/mnt/host-path-<sanitized>`). The sanitized
name is derived from the compose volume source and is identical across
deployments of the same stack, so two deployments sharing a cluster
collided at the containerPath — kind only honors the first deployment's
bind, subsequent deployments' pods silently read the first's content.
The same code path was also broken on real k8s, which has no way to
populate `/mnt/host-path-*` on worker nodes.
File-level compose binds are conceptually k8s ConfigMaps. The snowball
stack already uses the ConfigMap-backed named-volume pattern by hand.
Make that automatic at the k8s object-generation layer, without
touching deployment-dir compose or spec files.
Behavior at deploy create (validation only, no file mutation):
- :rw on a host-path bind -> DeployerException (use a named
volume for writable data)
- Directory with subdirectories -> DeployerException (embed in image,
split into configmaps, or use
initContainer)
- Directory or file > ~700 KiB -> DeployerException (ConfigMap budget)
- File, or flat small directory -> accepted, handled at deploy start
Behavior at deploy start:
- cluster_info.get_configmaps() additionally walks pod + job compose
volumes and emits a V1ConfigMap per host-path bind (deduped by
sanitized name across all pods/services). Content read from
{deployment_dir}/config/<pod>/<file> (already populated by
_copy_extra_config_dirs).
- volumes_for_pod_files emits V1ConfigMapVolumeSource instead of
V1HostPathVolumeSource for host-path binds.
- volume_mounts_for_service stats the source and sets V1VolumeMount
sub_path to the filename when source is a regular file — single-key
ConfigMaps land as files, whole-dir ConfigMaps land as directories.
- _generate_kind_mounts no longer emits `/mnt/host-path-*` extraMounts
for these binds (the ConfigMap path bypasses kind node FS entirely).
Deployment dir layout is unchanged. Compose files, spec.yml, and
{deployment_dir}/config/<pod>/ remain exactly as today — trivially
diffable against stack source, no synthetic volume names. ConfigMaps
are visible only in k8s (kubectl get cm -n <ns>).
The existing `/mnt/host-path-*` skip in check_mounts_compatible is
retained as a transition tolerance for deployments created before
this change.
Updates:
- deployment_create: _validate_host_path_mounts() called per pod/job
in the create loops; 700 KiB ConfigMap budget (accounts for base64
+ metadata overhead)
- helpers: _generate_kind_mounts skips host-path entries;
volumes_for_pod_files emits ConfigMap-backed V1Volume;
volume_mounts_for_service takes optional deployment_dir and
auto-sets sub_path for single-file sources
- cluster_info: new _host_path_bind_configmaps() walked from
get_configmaps(); volume_mounts_for_service call passes
deployment_dir from spec.file_path
- docs: document the behavior and the rejected shapes in
deployment_patterns.md
- tests: k8s-deploy asserts the host-path ConfigMaps exist,
compose/spec unchanged, and no `/mnt/host-path-*` extraMounts
Refs: so-b86
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 13:13:43 +00:00
|
|
|
# Reject host-path compose volumes whose shape can't land as a
|
|
|
|
|
# ConfigMap (dir-with-subdirs, oversize, writable). File-level
|
|
|
|
|
# and flat-dir host-path binds are accepted — they auto-convert
|
|
|
|
|
# to ConfigMaps at deploy start via cluster_info.get_configmaps.
|
|
|
|
|
if parsed_spec.is_kubernetes_deployment():
|
|
|
|
|
_validate_host_path_mounts(parsed_pod_file, pod, pod_file_path)
|
2023-07-18 14:59:07 +00:00
|
|
|
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
2023-11-20 16:12:57 +00:00
|
|
|
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
2025-10-17 15:21:23 +00:00
|
|
|
os.makedirs(destination_pod_dir, exist_ok=True)
|
2023-11-28 05:02:16 +00:00
|
|
|
if opts.o.debug:
|
2023-07-18 14:59:07 +00:00
|
|
|
print(f"extra config dirs: {extra_config_dirs}")
|
2023-06-28 03:18:04 +00:00
|
|
|
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
|
2026-01-22 01:58:31 +00:00
|
|
|
with open(
|
|
|
|
|
destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w"
|
|
|
|
|
) as output_file:
|
2023-06-27 22:58:41 +00:00
|
|
|
yaml.dump(parsed_pod_file, output_file)
|
2025-10-17 15:21:23 +00:00
|
|
|
|
2023-06-27 22:58:41 +00:00
|
|
|
# Copy the config files for the pod, if any
|
2023-07-18 14:59:07 +00:00
|
|
|
config_dirs = {pod}
|
|
|
|
|
config_dirs = config_dirs.union(extra_config_dirs)
|
|
|
|
|
for config_dir in config_dirs:
|
2024-04-23 21:47:20 +00:00
|
|
|
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
2023-07-18 14:59:07 +00:00
|
|
|
if os.path.exists(source_config_dir):
|
2025-10-17 15:21:23 +00:00
|
|
|
destination_config_dir = target_dir.joinpath("config", config_dir)
|
|
|
|
|
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
|
|
|
|
|
2023-10-09 20:54:55 +00:00
|
|
|
# Copy the script files for the pod, if any
|
|
|
|
|
if pod_has_scripts(parsed_stack, pod):
|
2023-11-20 16:12:57 +00:00
|
|
|
destination_script_dir = destination_pod_dir.joinpath("scripts")
|
2025-10-17 15:21:23 +00:00
|
|
|
os.makedirs(destination_script_dir, exist_ok=True)
|
2023-10-09 20:54:55 +00:00
|
|
|
script_paths = get_pod_script_paths(parsed_stack, pod)
|
|
|
|
|
_copy_files_to_directory(script_paths, destination_script_dir)
|
2025-10-17 15:21:23 +00:00
|
|
|
|
2026-03-11 03:56:21 +00:00
|
|
|
if not parsed_spec.is_kubernetes_deployment():
|
2025-10-17 15:21:23 +00:00
|
|
|
# TODO:
|
2026-02-01 00:05:27 +00:00
|
|
|
# This is odd - looks up config dir that matches a volume name,
|
|
|
|
|
# then copies as a mount dir?
|
|
|
|
|
# AFAICT not used by or relevant to any existing stack - roy
|
2025-10-17 15:21:23 +00:00
|
|
|
|
2024-08-10 02:32:21 +00:00
|
|
|
# TODO: We should probably only do this if the volume is marked :ro.
|
|
|
|
|
for volume_name, volume_path in parsed_spec.get_volumes().items():
|
|
|
|
|
source_config_dir = resolve_config_dir(stack_name, volume_name)
|
|
|
|
|
# Only copy if the source exists and is _not_ empty.
|
|
|
|
|
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
|
2025-10-17 15:21:23 +00:00
|
|
|
destination_config_dir = target_dir.joinpath(volume_path)
|
2024-08-10 02:32:21 +00:00
|
|
|
# Only copy if the destination exists and _is_ empty.
|
2026-01-22 01:58:31 +00:00
|
|
|
if os.path.exists(destination_config_dir) and not os.listdir(
|
|
|
|
|
destination_config_dir
|
|
|
|
|
):
|
|
|
|
|
copytree(
|
|
|
|
|
source_config_dir,
|
|
|
|
|
destination_config_dir,
|
|
|
|
|
dirs_exist_ok=True,
|
|
|
|
|
)
|
2024-08-09 02:32:06 +00:00
|
|
|
|
2026-03-11 03:56:21 +00:00
|
|
|
# Copy configmap directories for k8s deployments (outside the pod loop
|
|
|
|
|
# so this works for jobs-only stacks too)
|
|
|
|
|
if parsed_spec.is_kubernetes_deployment():
|
2026-04-14 06:00:27 +00:00
|
|
|
configmaps = parsed_spec.get_configmaps()
|
|
|
|
|
for configmap_name, configmap_path in configmaps.items():
|
|
|
|
|
# Spec values starting with ./ are deployment-dir destination
|
|
|
|
|
# paths (written by deploy init for auto-discovered configmaps).
|
|
|
|
|
# Other values are source paths relative to the stack root
|
|
|
|
|
# (user-defined in spec.yml). Fall back to the config/ dir
|
|
|
|
|
# convention if no value is provided.
|
|
|
|
|
if configmap_path and not str(configmap_path).startswith("./"):
|
2026-04-14 12:03:47 +00:00
|
|
|
# User-defined source path. Can be:
|
|
|
|
|
# - repo-relative: "stack-orchestrator/compose/maintenance"
|
|
|
|
|
# - home-relative: "~/.credentials/local-certs/s3"
|
|
|
|
|
# - absolute: "/path/to/dir"
|
|
|
|
|
source_config_dir = Path(os.path.expanduser(configmap_path))
|
2026-04-14 06:00:27 +00:00
|
|
|
else:
|
|
|
|
|
source_config_dir = resolve_config_dir(stack_name, configmap_name)
|
2026-03-11 03:56:21 +00:00
|
|
|
if os.path.exists(source_config_dir):
|
2026-04-14 06:00:27 +00:00
|
|
|
destination_config_dir = target_dir.joinpath("configmaps", configmap_name)
|
2026-03-18 21:55:28 +00:00
|
|
|
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
2026-03-11 03:56:21 +00:00
|
|
|
|
|
|
|
|
# Copy the job files into the target dir
|
2025-12-04 06:13:28 +00:00
|
|
|
jobs = get_job_list(parsed_stack)
|
2026-03-11 03:56:21 +00:00
|
|
|
if jobs:
|
2025-10-17 15:21:23 +00:00
|
|
|
destination_compose_jobs_dir = target_dir.joinpath("compose-jobs")
|
|
|
|
|
os.makedirs(destination_compose_jobs_dir, exist_ok=True)
|
2025-12-04 06:13:28 +00:00
|
|
|
for job in jobs:
|
|
|
|
|
job_file_path = get_job_file_path(stack_name, parsed_stack, job)
|
|
|
|
|
if job_file_path and job_file_path.exists():
|
|
|
|
|
parsed_job_file = yaml.load(open(job_file_path, "r"))
|
feat(k8s): auto-ConfigMap for file-level host-path compose volumes
File-level host-path compose volumes (e.g. `../config/foo.sh:/opt/foo.sh`)
were synthesized into a kind extraMount + hostPath PV chain with a
sanitized containerPath (`/mnt/host-path-<sanitized>`). The sanitized
name is derived from the compose volume source and is identical across
deployments of the same stack, so two deployments sharing a cluster
collided at the containerPath — kind only honors the first deployment's
bind, subsequent deployments' pods silently read the first's content.
The same code path was also broken on real k8s, which has no way to
populate `/mnt/host-path-*` on worker nodes.
File-level compose binds are conceptually k8s ConfigMaps. The snowball
stack already uses the ConfigMap-backed named-volume pattern by hand.
Make that automatic at the k8s object-generation layer, without
touching deployment-dir compose or spec files.
Behavior at deploy create (validation only, no file mutation):
- :rw on a host-path bind -> DeployerException (use a named
volume for writable data)
- Directory with subdirectories -> DeployerException (embed in image,
split into configmaps, or use
initContainer)
- Directory or file > ~700 KiB -> DeployerException (ConfigMap budget)
- File, or flat small directory -> accepted, handled at deploy start
Behavior at deploy start:
- cluster_info.get_configmaps() additionally walks pod + job compose
volumes and emits a V1ConfigMap per host-path bind (deduped by
sanitized name across all pods/services). Content read from
{deployment_dir}/config/<pod>/<file> (already populated by
_copy_extra_config_dirs).
- volumes_for_pod_files emits V1ConfigMapVolumeSource instead of
V1HostPathVolumeSource for host-path binds.
- volume_mounts_for_service stats the source and sets V1VolumeMount
sub_path to the filename when source is a regular file — single-key
ConfigMaps land as files, whole-dir ConfigMaps land as directories.
- _generate_kind_mounts no longer emits `/mnt/host-path-*` extraMounts
for these binds (the ConfigMap path bypasses kind node FS entirely).
Deployment dir layout is unchanged. Compose files, spec.yml, and
{deployment_dir}/config/<pod>/ remain exactly as today — trivially
diffable against stack source, no synthetic volume names. ConfigMaps
are visible only in k8s (kubectl get cm -n <ns>).
The existing `/mnt/host-path-*` skip in check_mounts_compatible is
retained as a transition tolerance for deployments created before
this change.
Updates:
- deployment_create: _validate_host_path_mounts() called per pod/job
in the create loops; 700 KiB ConfigMap budget (accounts for base64
+ metadata overhead)
- helpers: _generate_kind_mounts skips host-path entries;
volumes_for_pod_files emits ConfigMap-backed V1Volume;
volume_mounts_for_service takes optional deployment_dir and
auto-sets sub_path for single-file sources
- cluster_info: new _host_path_bind_configmaps() walked from
get_configmaps(); volume_mounts_for_service call passes
deployment_dir from spec.file_path
- docs: document the behavior and the rejected shapes in
deployment_patterns.md
- tests: k8s-deploy asserts the host-path ConfigMaps exist,
compose/spec unchanged, and no `/mnt/host-path-*` extraMounts
Refs: so-b86
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 13:13:43 +00:00
|
|
|
if parsed_spec.is_kubernetes_deployment():
|
|
|
|
|
_validate_host_path_mounts(
|
|
|
|
|
parsed_job_file, job, job_file_path
|
|
|
|
|
)
|
2025-12-04 06:13:28 +00:00
|
|
|
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
|
2026-01-22 01:58:31 +00:00
|
|
|
with open(
|
|
|
|
|
destination_compose_jobs_dir.joinpath(
|
|
|
|
|
"docker-compose-%s.yml" % job
|
|
|
|
|
),
|
|
|
|
|
"w",
|
|
|
|
|
) as output_file:
|
2025-12-04 06:13:28 +00:00
|
|
|
yaml.dump(parsed_job_file, output_file)
|
|
|
|
|
if opts.o.debug:
|
|
|
|
|
print(f"Copied job compose file: {job}")
|
|
|
|
|
|
2023-07-24 02:54:05 +00:00
|
|
|
|
2023-08-23 21:20:28 +00:00
|
|
|
# TODO: this code should be in the stack .py files but
|
|
|
|
|
# we haven't yet figured out how to integrate click across
|
|
|
|
|
# the plugin boundary
|
2023-07-24 02:54:05 +00:00
|
|
|
@click.command()
|
2023-08-23 21:20:28 +00:00
|
|
|
@click.option("--node-moniker", help="Moniker for this node")
|
|
|
|
|
@click.option("--chain-id", help="The new chain id")
|
|
|
|
|
@click.option("--key-name", help="Name for new node key")
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--gentx-files", help="List of comma-delimited gentx filenames from other nodes"
|
|
|
|
|
)
|
|
|
|
|
@click.option(
|
|
|
|
|
"--gentx-addresses",
|
|
|
|
|
type=str,
|
|
|
|
|
help="List of comma-delimited validator addresses for other nodes",
|
|
|
|
|
)
|
2023-08-23 21:20:28 +00:00
|
|
|
@click.option("--genesis-file", help="Genesis file for the network")
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.option(
|
|
|
|
|
"--initialize-network", is_flag=True, default=False, help="Initialize phase"
|
|
|
|
|
)
|
2023-08-23 21:20:28 +00:00
|
|
|
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
2024-08-07 17:28:10 +00:00
|
|
|
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
|
2023-08-23 21:20:28 +00:00
|
|
|
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
|
|
|
|
@click.option("--network-dir", help="Directory for network files")
|
2026-01-22 01:58:31 +00:00
|
|
|
@click.argument("extra_args", nargs=-1)
|
2023-07-24 02:54:05 +00:00
|
|
|
@click.pass_context
|
2026-01-22 01:58:31 +00:00
|
|
|
def setup(
|
|
|
|
|
ctx,
|
|
|
|
|
node_moniker,
|
|
|
|
|
chain_id,
|
|
|
|
|
key_name,
|
|
|
|
|
gentx_files,
|
|
|
|
|
gentx_addresses,
|
|
|
|
|
genesis_file,
|
|
|
|
|
initialize_network,
|
|
|
|
|
join_network,
|
|
|
|
|
connect_network,
|
|
|
|
|
create_network,
|
|
|
|
|
network_dir,
|
|
|
|
|
extra_args,
|
|
|
|
|
):
|
|
|
|
|
parmeters = LaconicStackSetupCommand(
|
|
|
|
|
chain_id,
|
|
|
|
|
node_moniker,
|
|
|
|
|
key_name,
|
|
|
|
|
initialize_network,
|
|
|
|
|
join_network,
|
|
|
|
|
connect_network,
|
|
|
|
|
create_network,
|
|
|
|
|
gentx_files,
|
|
|
|
|
gentx_addresses,
|
|
|
|
|
genesis_file,
|
|
|
|
|
network_dir,
|
|
|
|
|
)
|
2023-08-23 21:20:28 +00:00
|
|
|
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|