feat(k8s): decouple deployment-id from cluster-id
cluster-id plays two roles today: (a) which kind cluster this deployment attaches to (used for the kube-config context name) and (b) compose_project_name -> app_name, the prefix for every k8s resource the deployment creates. _get_existing_kind_cluster() in deploy create forces (a) to inherit the running cluster's name, and because (a) and (b) are the same field, (b) inherits too — so two deployments that share a cluster also share an app_name and collide on every resource whose suffix isn't naturally distinct (PVs are cluster-scoped; same-stack deployments collide there in particular). Decouple: add a distinct `deployment-id` field. cluster-id keeps its current behavior (inherit running cluster, else fresh). deployment-id is always fresh per `deploy create`. K8sDeployer sources kind_cluster_name from cluster-id and app_name from deployment-id. Backward compatibility: - Existing deployment.yml files have only cluster-id; no on-disk change until the next `deploy create`. - DeploymentContext.init() falls back: deployment-id = cluster-id when the field is absent. Existing deployments keep their current app_name and resource names on next start — no PV renames, no re-binds, no data orphaning. - `compose_project_name` parameter to K8sDeployer is retained (still used by the compose deployer path); only the k8s-side internals switch to deployment_context getters. - The helm chart generator continues to derive chart names from cluster-id; untouched here, worth a follow-up for consistency. Effect on woodburn: dumpster/rpc/trashscan each already carry a distinct cluster-id in their deployment.yml (pre-`_get_existing_kind_cluster` era). Under the fallback, they all adopt their existing cluster-id as deployment-id, so resource names are identical to today. Effect on new deployments: even when they share a running cluster (kind-cluster-name in kube-config matches cluster-id), they get distinct deployment-ids at deploy create, and thus distinct resource name prefixes. The same-stack PV collision the namespace ownership check surfaces goes away by construction. Test: run-deploy-test.sh now reads deployment-id from the new field, falling back to cluster-id for pre-decouple fixtures. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>pull/748/head
parent
b9148c8051
commit
ac4a509d6f
|
|
@ -23,6 +23,7 @@ compose_deploy_type = "compose"
|
|||
k8s_kind_deploy_type = "k8s-kind"
|
||||
k8s_deploy_type = "k8s"
|
||||
cluster_id_key = "cluster-id"
|
||||
deployment_id_key = "deployment-id"
|
||||
kube_config_key = "kube-config"
|
||||
deploy_to_key = "deploy-to"
|
||||
network_key = "network"
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ from stack_orchestrator.deploy.spec import Spec
|
|||
class DeploymentContext:
|
||||
deployment_dir: Path
|
||||
id: str
|
||||
deployment_id: str
|
||||
spec: Spec
|
||||
stack: Stack
|
||||
|
||||
|
|
@ -48,8 +49,27 @@ class DeploymentContext:
|
|||
return self.get_compose_dir() / f"docker-compose-{name}.yml"
|
||||
|
||||
def get_cluster_id(self):
|
||||
"""Identifier of the kind cluster this deployment attaches to.
|
||||
|
||||
Shared across deployments that join the same kind cluster. Used
|
||||
for the kube-config context name (`kind-{cluster-id}`) and for
|
||||
kind cluster lifecycle ops.
|
||||
"""
|
||||
return self.id
|
||||
|
||||
def get_deployment_id(self):
|
||||
"""Identifier of this particular deployment's k8s resources.
|
||||
|
||||
Distinct per deployment even when multiple deployments share a
|
||||
cluster. Used as compose_project_name → app_name → prefix for
|
||||
all k8s resource names (PVs, ConfigMaps, Deployments, …).
|
||||
|
||||
Backward compat: for deployment.yml files written before this
|
||||
field existed, falls back to cluster-id so existing on-disk
|
||||
resource names remain stable (no PV renames, no re-bind).
|
||||
"""
|
||||
return self.deployment_id
|
||||
|
||||
def init(self, dir: Path):
|
||||
self.deployment_dir = dir.absolute()
|
||||
self.spec = Spec()
|
||||
|
|
@ -60,6 +80,12 @@ class DeploymentContext:
|
|||
if deployment_file_path.exists():
|
||||
obj = get_yaml().load(open(deployment_file_path, "r"))
|
||||
self.id = obj[constants.cluster_id_key]
|
||||
# Fallback to cluster-id for deployments created before the
|
||||
# deployment-id field was introduced. Keeps existing resource
|
||||
# names stable across this upgrade.
|
||||
self.deployment_id = obj.get(
|
||||
constants.deployment_id_key, self.id
|
||||
)
|
||||
# Handle the case of a legacy deployment with no file
|
||||
# Code below is intended to match the output from _make_default_cluster_name()
|
||||
# TODO: remove when we no longer need to support legacy deployments
|
||||
|
|
@ -68,6 +94,7 @@ class DeploymentContext:
|
|||
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
|
||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
|
||||
self.id = f"{constants.cluster_name_prefix}{hash}"
|
||||
self.deployment_id = self.id
|
||||
|
||||
def modify_yaml(self, file_path: Path, modifier_func):
|
||||
"""Load a YAML, apply a modification function, and write it back."""
|
||||
|
|
|
|||
|
|
@ -887,7 +887,15 @@ def _create_deployment_file(deployment_dir: Path, stack_source: Optional[Path] =
|
|||
# Reuse existing Kind cluster if one exists, otherwise generate a timestamp-based ID
|
||||
existing = _get_existing_kind_cluster()
|
||||
cluster = existing if existing else generate_id("laconic")
|
||||
deployment_content = {constants.cluster_id_key: cluster}
|
||||
# deployment-id is always fresh per `deploy create`, even when
|
||||
# cluster-id is inherited from a running cluster. Keeps each
|
||||
# deployment's k8s resource names (PVs, ConfigMaps, Deployment)
|
||||
# distinct even when multiple deployments share a cluster.
|
||||
deployment_id = generate_id("laconic")
|
||||
deployment_content = {
|
||||
constants.cluster_id_key: cluster,
|
||||
constants.deployment_id_key: deployment_id,
|
||||
}
|
||||
if stack_source:
|
||||
deployment_content["stack-source"] = str(stack_source)
|
||||
with open(deployment_file_path, "w") as output_file:
|
||||
|
|
|
|||
|
|
@ -129,27 +129,34 @@ class K8sDeployer(Deployer):
|
|||
return
|
||||
self.deployment_dir = deployment_context.deployment_dir
|
||||
self.deployment_context = deployment_context
|
||||
# kind cluster name comes from cluster-id — which kind cluster this
|
||||
# deployment attaches to. Shared across deployments that join the
|
||||
# same cluster. compose_project_name is kept as a parameter for
|
||||
# interface compatibility with the compose deployer path.
|
||||
cluster_id = deployment_context.get_cluster_id()
|
||||
deployment_id = deployment_context.get_deployment_id()
|
||||
self.kind_cluster_name = (
|
||||
deployment_context.spec.get_kind_cluster_name() or compose_project_name
|
||||
)
|
||||
# Use spec namespace if provided, otherwise derive from cluster-id
|
||||
self.k8s_namespace = (
|
||||
deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
|
||||
deployment_context.spec.get_kind_cluster_name() or cluster_id
|
||||
)
|
||||
self.cluster_info = ClusterInfo()
|
||||
# stack.name may be an absolute path (from spec "stack:" key after
|
||||
# path resolution). Extract just the directory basename for labels.
|
||||
raw_name = deployment_context.stack.name if deployment_context else ""
|
||||
stack_name = Path(raw_name).name if raw_name else ""
|
||||
# Use spec namespace if provided, otherwise derive from stack name
|
||||
# Namespace: spec override wins; else derive from stack name; else
|
||||
# fall back to deployment-id. (On older deployment.yml files without
|
||||
# deployment-id, get_deployment_id() returns cluster-id — same as
|
||||
# the pre-decouple behavior.)
|
||||
self.k8s_namespace = deployment_context.spec.get_namespace() or (
|
||||
f"laconic-{stack_name}" if stack_name else f"laconic-{compose_project_name}"
|
||||
f"laconic-{stack_name}" if stack_name else f"laconic-{deployment_id}"
|
||||
)
|
||||
self.cluster_info = ClusterInfo()
|
||||
# app_name comes from deployment-id so each deployment owns its own
|
||||
# k8s resource names, even when multiple deployments share a cluster.
|
||||
self.cluster_info.int(
|
||||
compose_files,
|
||||
compose_env_file,
|
||||
compose_project_name,
|
||||
deployment_id,
|
||||
deployment_context.spec,
|
||||
stack_name=stack_name,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -147,7 +147,13 @@ deployment_spec_file=${test_deployment_dir}/spec.yml
|
|||
sed -i 's/^secrets: {}$/secrets:\n test-secret:\n - TEST_SECRET_KEY/' ${deployment_spec_file}
|
||||
|
||||
# Get the deployment ID and namespace for kubectl queries
|
||||
deployment_id=$(cat ${test_deployment_dir}/deployment.yml | cut -d ' ' -f 2)
|
||||
# deployment-id is what flows into app_name → resource name prefix.
|
||||
# Fall back to cluster-id for deployment.yml files written before the
|
||||
# deployment-id field existed (pre-decouple compatibility).
|
||||
deployment_id=$(awk '/^deployment-id:/ {print $2; exit}' ${test_deployment_dir}/deployment.yml)
|
||||
if [ -z "$deployment_id" ]; then
|
||||
deployment_id=$(awk '/^cluster-id:/ {print $2; exit}' ${test_deployment_dir}/deployment.yml)
|
||||
fi
|
||||
# Namespace is derived from stack name: laconic-{stack_name}
|
||||
deployment_ns="laconic-test"
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue