wd-a7b: Fix cluster-id and namespace naming
- Replace token_hex cluster IDs with sortable timestamp-based IDs
(laconic-{base62_timestamp}{random_suffix}) via new ids.py module
- Check for existing Kind cluster before generating a new cluster-id
- Derive k8s namespace from stack name instead of compose_project_name
(e.g. laconic-dumpster instead of laconic-<random>)
- Plumb namespace through to secret generation instead of hardcoding
'default'
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
pull/740/head
parent
8a7491d3e0
commit
36c37d2bde
|
|
@ -24,11 +24,13 @@ from typing import List, Optional
|
|||
import random
|
||||
from shutil import copy, copyfile, copytree, rmtree
|
||||
from secrets import token_hex
|
||||
import subprocess
|
||||
import sys
|
||||
import filecmp
|
||||
import tempfile
|
||||
|
||||
from stack_orchestrator import constants
|
||||
from stack_orchestrator.ids import generate_id
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.util import (
|
||||
get_stack_path,
|
||||
|
|
@ -513,7 +515,9 @@ def init_operation(
|
|||
GENERATE_TOKEN_PATTERN = re.compile(r"\$generate:(\w+):(\d+)\$")
|
||||
|
||||
|
||||
def _generate_and_store_secrets(config_vars: dict, deployment_name: str):
|
||||
def _generate_and_store_secrets(
|
||||
config_vars: dict, deployment_name: str, namespace: str = "default"
|
||||
):
|
||||
"""Generate secrets for $generate:...$ tokens and store in K8s Secret.
|
||||
|
||||
Called by `deploy create` - generates fresh secrets and stores them.
|
||||
|
|
@ -555,7 +559,6 @@ def _generate_and_store_secrets(config_vars: dict, deployment_name: str):
|
|||
|
||||
v1 = client.CoreV1Api()
|
||||
secret_name = f"{deployment_name}-generated-secrets"
|
||||
namespace = "default"
|
||||
|
||||
secret_data = {k: base64.b64encode(v.encode()).decode() for k, v in secrets.items()}
|
||||
k8s_secret = client.V1Secret(
|
||||
|
|
@ -659,7 +662,10 @@ def create_registry_secret(spec: Spec, deployment_name: str) -> Optional[str]:
|
|||
|
||||
|
||||
def _write_config_file(
|
||||
spec_file: Path, config_env_file: Path, deployment_name: Optional[str] = None
|
||||
spec_file: Path,
|
||||
config_env_file: Path,
|
||||
deployment_name: Optional[str] = None,
|
||||
namespace: str = "default",
|
||||
):
|
||||
spec_content = get_parsed_deployment_spec(spec_file)
|
||||
config_vars = spec_content.get("config", {}) or {}
|
||||
|
|
@ -671,7 +677,7 @@ def _write_config_file(
|
|||
for v in config_vars.values()
|
||||
)
|
||||
if has_generate_tokens:
|
||||
_generate_and_store_secrets(config_vars, deployment_name)
|
||||
_generate_and_store_secrets(config_vars, deployment_name, namespace)
|
||||
|
||||
# Write non-secret config to config.env (exclude $generate:...$ tokens)
|
||||
with open(config_env_file, "w") as output_file:
|
||||
|
|
@ -697,9 +703,31 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path):
|
|||
copy(path, os.path.join(directory, os.path.basename(path)))
|
||||
|
||||
|
||||
def _get_existing_kind_cluster() -> Optional[str]:
|
||||
"""Return the name of an existing Kind cluster, or None."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["kind", "get", "clusters"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
clusters = [
|
||||
c.strip() for c in result.stdout.strip().splitlines() if c.strip()
|
||||
]
|
||||
if clusters:
|
||||
return clusters[0]
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _create_deployment_file(deployment_dir: Path, stack_source: Optional[Path] = None):
|
||||
deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name)
|
||||
cluster = f"{constants.cluster_name_prefix}{token_hex(8)}"
|
||||
# Reuse existing Kind cluster if one exists, otherwise generate a timestamp-based ID
|
||||
existing = _get_existing_kind_cluster()
|
||||
cluster = existing if existing else generate_id("laconic")
|
||||
deployment_content = {constants.cluster_id_key: cluster}
|
||||
if stack_source:
|
||||
deployment_content["stack-source"] = str(stack_source)
|
||||
|
|
@ -953,8 +981,13 @@ def _write_deployment_files(
|
|||
# Use stack_name as deployment_name for K8s secret naming
|
||||
# Extract just the name part if stack_name is a path ("path/to/stack" -> "stack")
|
||||
deployment_name = Path(stack_name).name.replace("_", "-")
|
||||
# Derive namespace from spec or stack name, matching deploy_k8s logic
|
||||
namespace = parsed_spec.get_namespace() or f"laconic-{deployment_name}"
|
||||
_write_config_file(
|
||||
spec_file, target_dir.joinpath(constants.config_file_name), deployment_name
|
||||
spec_file,
|
||||
target_dir.joinpath(constants.config_file_name),
|
||||
deployment_name,
|
||||
namespace=namespace,
|
||||
)
|
||||
|
||||
# Copy any k8s config file into the target dir
|
||||
|
|
@ -1032,12 +1065,8 @@ def _write_deployment_files(
|
|||
for configmap in parsed_spec.get_configmaps():
|
||||
source_config_dir = resolve_config_dir(stack_name, configmap)
|
||||
if os.path.exists(source_config_dir):
|
||||
destination_config_dir = target_dir.joinpath(
|
||||
"configmaps", configmap
|
||||
)
|
||||
copytree(
|
||||
source_config_dir, destination_config_dir, dirs_exist_ok=True
|
||||
)
|
||||
destination_config_dir = target_dir.joinpath("configmaps", configmap)
|
||||
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
||||
|
||||
# Copy the job files into the target dir
|
||||
jobs = get_job_list(parsed_stack)
|
||||
|
|
|
|||
|
|
@ -82,7 +82,14 @@ class ClusterInfo:
|
|||
def __init__(self) -> None:
|
||||
self.parsed_job_yaml_map = {}
|
||||
|
||||
def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec, stack_name=""):
|
||||
def int(
|
||||
self,
|
||||
pod_files: List[str],
|
||||
compose_env_file,
|
||||
deployment_name,
|
||||
spec: Spec,
|
||||
stack_name="",
|
||||
):
|
||||
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||
# Find the set of images in the pods
|
||||
self.image_set = images_for_deployment(pod_files)
|
||||
|
|
@ -292,8 +299,7 @@ class ClusterInfo:
|
|||
|
||||
# Per-volume resources override global, which overrides default.
|
||||
vol_resources = (
|
||||
self.spec.get_volume_resources_for(volume_name)
|
||||
or global_resources
|
||||
self.spec.get_volume_resources_for(volume_name) or global_resources
|
||||
)
|
||||
|
||||
labels = {
|
||||
|
|
@ -395,8 +401,7 @@ class ClusterInfo:
|
|||
continue
|
||||
|
||||
vol_resources = (
|
||||
self.spec.get_volume_resources_for(volume_name)
|
||||
or global_resources
|
||||
self.spec.get_volume_resources_for(volume_name) or global_resources
|
||||
)
|
||||
if self.spec.is_kind_deployment():
|
||||
host_path = client.V1HostPathVolumeSource(
|
||||
|
|
@ -531,9 +536,7 @@ class ClusterInfo:
|
|||
if self.spec.get_image_registry() is not None
|
||||
else image
|
||||
)
|
||||
volume_mounts = volume_mounts_for_service(
|
||||
parsed_yaml_map, service_name
|
||||
)
|
||||
volume_mounts = volume_mounts_for_service(parsed_yaml_map, service_name)
|
||||
# Handle command/entrypoint from compose file
|
||||
# In docker-compose: entrypoint -> k8s command, command -> k8s args
|
||||
container_command = None
|
||||
|
|
@ -581,7 +584,9 @@ class ClusterInfo:
|
|||
volume_mounts=volume_mounts,
|
||||
security_context=client.V1SecurityContext(
|
||||
privileged=self.spec.get_privileged(),
|
||||
run_as_user=int(service_info["user"]) if "user" in service_info else None,
|
||||
run_as_user=int(service_info["user"])
|
||||
if "user" in service_info
|
||||
else None,
|
||||
capabilities=client.V1Capabilities(
|
||||
add=self.spec.get_capabilities()
|
||||
)
|
||||
|
|
@ -595,19 +600,17 @@ class ClusterInfo:
|
|||
svc_labels = service_info.get("labels", {})
|
||||
if isinstance(svc_labels, list):
|
||||
# docker-compose labels can be a list of "key=value"
|
||||
svc_labels = dict(
|
||||
item.split("=", 1) for item in svc_labels
|
||||
svc_labels = dict(item.split("=", 1) for item in svc_labels)
|
||||
is_init = str(svc_labels.get("laconic.init-container", "")).lower() in (
|
||||
"true",
|
||||
"1",
|
||||
"yes",
|
||||
)
|
||||
is_init = str(
|
||||
svc_labels.get("laconic.init-container", "")
|
||||
).lower() in ("true", "1", "yes")
|
||||
if is_init:
|
||||
init_containers.append(container)
|
||||
else:
|
||||
containers.append(container)
|
||||
volumes = volumes_for_pod_files(
|
||||
parsed_yaml_map, self.spec, self.app_name
|
||||
)
|
||||
volumes = volumes_for_pod_files(parsed_yaml_map, self.spec, self.app_name)
|
||||
return containers, init_containers, services, volumes
|
||||
|
||||
# TODO: put things like image pull policy into an object-scope struct
|
||||
|
|
@ -704,7 +707,14 @@ class ClusterInfo:
|
|||
kind="Deployment",
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=f"{self.app_name}-deployment",
|
||||
labels={"app": self.app_name, **({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {})},
|
||||
labels={
|
||||
"app": self.app_name,
|
||||
**(
|
||||
{"app.kubernetes.io/stack": self.stack_name}
|
||||
if self.stack_name
|
||||
else {}
|
||||
),
|
||||
},
|
||||
),
|
||||
spec=spec,
|
||||
)
|
||||
|
|
@ -732,8 +742,8 @@ class ClusterInfo:
|
|||
for job_file in self.parsed_job_yaml_map:
|
||||
# Build containers for this single job file
|
||||
single_job_map = {job_file: self.parsed_job_yaml_map[job_file]}
|
||||
containers, init_containers, _services, volumes = (
|
||||
self._build_containers(single_job_map, image_pull_policy)
|
||||
containers, init_containers, _services, volumes = self._build_containers(
|
||||
single_job_map, image_pull_policy
|
||||
)
|
||||
|
||||
# Derive job name from file path: docker-compose-<name>.yml -> <name>
|
||||
|
|
@ -751,12 +761,14 @@ class ClusterInfo:
|
|||
# picked up by pods_in_deployment() which queries app={app_name}.
|
||||
pod_labels = {
|
||||
"app": f"{self.app_name}-job",
|
||||
**({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {}),
|
||||
**(
|
||||
{"app.kubernetes.io/stack": self.stack_name}
|
||||
if self.stack_name
|
||||
else {}
|
||||
),
|
||||
}
|
||||
template = client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels=pod_labels
|
||||
),
|
||||
metadata=client.V1ObjectMeta(labels=pod_labels),
|
||||
spec=client.V1PodSpec(
|
||||
containers=containers,
|
||||
init_containers=init_containers or None,
|
||||
|
|
@ -769,7 +781,14 @@ class ClusterInfo:
|
|||
template=template,
|
||||
backoff_limit=0,
|
||||
)
|
||||
job_labels = {"app": self.app_name, **({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {})}
|
||||
job_labels = {
|
||||
"app": self.app_name,
|
||||
**(
|
||||
{"app.kubernetes.io/stack": self.stack_name}
|
||||
if self.stack_name
|
||||
else {}
|
||||
),
|
||||
}
|
||||
job = client.V1Job(
|
||||
api_version="batch/v1",
|
||||
kind="Job",
|
||||
|
|
|
|||
|
|
@ -122,14 +122,18 @@ class K8sDeployer(Deployer):
|
|||
return
|
||||
self.deployment_dir = deployment_context.deployment_dir
|
||||
self.deployment_context = deployment_context
|
||||
self.kind_cluster_name = deployment_context.spec.get_kind_cluster_name() or compose_project_name
|
||||
# Use spec namespace if provided, otherwise derive from cluster-id
|
||||
self.k8s_namespace = deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
|
||||
self.cluster_info = ClusterInfo()
|
||||
self.kind_cluster_name = (
|
||||
deployment_context.spec.get_kind_cluster_name() or compose_project_name
|
||||
)
|
||||
# stack.name may be an absolute path (from spec "stack:" key after
|
||||
# path resolution). Extract just the directory basename for labels.
|
||||
raw_name = deployment_context.stack.name if deployment_context else ""
|
||||
stack_name = Path(raw_name).name if raw_name else ""
|
||||
# Use spec namespace if provided, otherwise derive from stack name
|
||||
self.k8s_namespace = deployment_context.spec.get_namespace() or (
|
||||
f"laconic-{stack_name}" if stack_name else f"laconic-{compose_project_name}"
|
||||
)
|
||||
self.cluster_info = ClusterInfo()
|
||||
self.cluster_info.int(
|
||||
compose_files,
|
||||
compose_env_file,
|
||||
|
|
@ -232,7 +236,8 @@ class K8sDeployer(Deployer):
|
|||
for job in jobs.items:
|
||||
print(f"Deleting Job {job.metadata.name}")
|
||||
self.batch_api.delete_namespaced_job(
|
||||
name=job.metadata.name, namespace=ns,
|
||||
name=job.metadata.name,
|
||||
namespace=ns,
|
||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
||||
)
|
||||
except ApiException as e:
|
||||
|
|
@ -555,7 +560,10 @@ class K8sDeployer(Deployer):
|
|||
|
||||
# Call start() hooks — stacks can create additional k8s resources
|
||||
if self.deployment_context:
|
||||
from stack_orchestrator.deploy.deployment_create import call_stack_deploy_start
|
||||
from stack_orchestrator.deploy.deployment_create import (
|
||||
call_stack_deploy_start,
|
||||
)
|
||||
|
||||
call_stack_deploy_start(self.deployment_context)
|
||||
|
||||
def down(self, timeout, volumes, skip_cluster_management):
|
||||
|
|
@ -567,9 +575,7 @@ class K8sDeployer(Deployer):
|
|||
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
|
||||
if volumes:
|
||||
try:
|
||||
pvs = self.core_api.list_persistent_volume(
|
||||
label_selector=app_label
|
||||
)
|
||||
pvs = self.core_api.list_persistent_volume(label_selector=app_label)
|
||||
for pv in pvs.items:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting PV: {pv.metadata.name}")
|
||||
|
|
@ -713,14 +719,18 @@ class K8sDeployer(Deployer):
|
|||
|
||||
def logs(self, services, tail, follow, stream):
|
||||
self.connect_api()
|
||||
pods = pods_in_deployment(self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace)
|
||||
pods = pods_in_deployment(
|
||||
self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace
|
||||
)
|
||||
if len(pods) > 1:
|
||||
print("Warning: more than one pod in the deployment")
|
||||
if len(pods) == 0:
|
||||
log_data = "******* Pods not running ********\n"
|
||||
else:
|
||||
k8s_pod_name = pods[0]
|
||||
containers = containers_in_pod(self.core_api, k8s_pod_name, namespace=self.k8s_namespace)
|
||||
containers = containers_in_pod(
|
||||
self.core_api, k8s_pod_name, namespace=self.k8s_namespace
|
||||
)
|
||||
# If pod not started, logs request below will throw an exception
|
||||
try:
|
||||
log_data = ""
|
||||
|
|
|
|||
|
|
@ -393,7 +393,9 @@ def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
|
|||
raise DeployerException(f"kind load docker-image failed: {result}")
|
||||
|
||||
|
||||
def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str, namespace: str = "default"):
|
||||
def pods_in_deployment(
|
||||
core_api: client.CoreV1Api, deployment_name: str, namespace: str = "default"
|
||||
):
|
||||
pods = []
|
||||
pod_response = core_api.list_namespaced_pod(
|
||||
namespace=namespace, label_selector=f"app={deployment_name}"
|
||||
|
|
@ -406,7 +408,9 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str, namespa
|
|||
return pods
|
||||
|
||||
|
||||
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str, namespace: str = "default") -> List[str]:
|
||||
def containers_in_pod(
|
||||
core_api: client.CoreV1Api, pod_name: str, namespace: str = "default"
|
||||
) -> List[str]:
|
||||
containers: List[str] = []
|
||||
pod_response = cast(
|
||||
client.V1Pod, core_api.read_namespaced_pod(pod_name, namespace=namespace)
|
||||
|
|
|
|||
|
|
@ -170,15 +170,13 @@ class Spec:
|
|||
Returns the per-volume Resources if found, otherwise None.
|
||||
The caller should fall back to get_volume_resources() then the default.
|
||||
"""
|
||||
vol_section = (
|
||||
self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {})
|
||||
vol_section = self.obj.get(constants.resources_key, {}).get(
|
||||
constants.volumes_key, {}
|
||||
)
|
||||
if volume_name not in vol_section:
|
||||
return None
|
||||
entry = vol_section[volume_name]
|
||||
if isinstance(entry, dict) and (
|
||||
"reservations" in entry or "limits" in entry
|
||||
):
|
||||
if isinstance(entry, dict) and ("reservations" in entry or "limits" in entry):
|
||||
return Resources(entry)
|
||||
return None
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
"""Sortable timestamp-based ID generation for cluster naming.
|
||||
|
||||
Uses base62 encoding with 100ms resolution and a 2024-01-01 epoch
|
||||
to produce compact, sortable IDs like 'laconic-iqE6Za'.
|
||||
|
||||
Format: {prefix}-{timestamp}{random}
|
||||
- timestamp: 5 chars (100ms resolution, ~180 years from 2024)
|
||||
- random: 2 chars (3,844 unique per 100ms slot)
|
||||
"""
|
||||
# Adapted from exophial/src/exophial/ids.py
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
# 2024-01-01 00:00:00 UTC in milliseconds
|
||||
EPOCH_2024 = 1704067200000
|
||||
|
||||
# Sortable base62 alphabet (0-9, A-Z, a-z)
|
||||
ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
|
||||
def _base62(n: int) -> str:
|
||||
"""Encode integer as base62 string."""
|
||||
if n == 0:
|
||||
return ALPHABET[0]
|
||||
s = ""
|
||||
while n:
|
||||
n, r = divmod(n, 62)
|
||||
s = ALPHABET[r] + s
|
||||
return s
|
||||
|
||||
|
||||
def _random_suffix(length: int = 2) -> str:
|
||||
"""Generate random base62 suffix."""
|
||||
return "".join(random.choice(ALPHABET) for _ in range(length))
|
||||
|
||||
|
||||
def _timestamp_id() -> str:
|
||||
"""Generate a sortable timestamp ID (100ms resolution, 2024 epoch) with random suffix."""
|
||||
now_ms = int(time.time() * 1000)
|
||||
offset = (now_ms - EPOCH_2024) // 100 # 100ms resolution
|
||||
return f"{_base62(offset)}{_random_suffix()}"
|
||||
|
||||
|
||||
def generate_id(prefix: str) -> str:
|
||||
"""Generate a sortable ID with an arbitrary prefix like 'laconic-iqE6Za'."""
|
||||
return f"{prefix}-{_timestamp_id()}"
|
||||
Loading…
Reference in New Issue