fix: imagePullPolicy for kind, job images, duplicate registry call, test namespace

- deploy_k8s.py: default imagePullPolicy to IfNotPresent for kind
  (local images loaded via kind load, not pulled from registry)
- cluster_info.py: add job images to image_set so they're loaded into kind
- deploy_k8s.py: remove duplicate create_registry_secret call (merge artifact)
- deploy_k8s.py: fix indentation in run_job job_pull_policy (replace_all damage)
- tests/k8s-deploy: update namespace from laconic-{id} to laconic-{stack_name}
  to match the new stack-derived namespace scheme from wd-a7b

All 15 k8s deploy e2e tests pass.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
pull/740/head
A. F. Dudley 2026-04-01 23:34:51 +00:00
parent 66da312f67
commit 87761c7041
3 changed files with 19 additions and 13 deletions

View File

@ -107,6 +107,8 @@ class ClusterInfo:
def init_jobs(self, job_files: List[str]): def init_jobs(self, job_files: List[str]):
"""Initialize parsed job YAML map from job compose files.""" """Initialize parsed job YAML map from job compose files."""
self.parsed_job_yaml_map = parsed_pod_files_map_from_file_names(job_files) self.parsed_job_yaml_map = parsed_pod_files_map_from_file_names(job_files)
# Add job images to the image set so they're loaded into kind
self.image_set.update(images_for_deployment(job_files))
if opts.o.debug: if opts.o.debug:
print(f"Parsed job yaml map: {self.parsed_job_yaml_map}") print(f"Parsed job yaml map: {self.parsed_job_yaml_map}")

View File

@ -563,9 +563,10 @@ class K8sDeployer(Deployer):
print("No pods defined, skipping Deployment creation") print("No pods defined, skipping Deployment creation")
return return
# Process compose files into Deployments (one per pod file) # Process compose files into Deployments (one per pod file)
# image-pull-policy from spec, default Always (production). # image-pull-policy from spec. Default IfNotPresent for kind (local
# Testing specs use IfNotPresent so kind-loaded local images are used. # images are loaded via `kind load`), Always for production k8s.
pull_policy = self.cluster_info.spec.get("image-pull-policy", "Always") default_policy = "IfNotPresent" if self.is_kind() else "Always"
pull_policy = self.cluster_info.spec.get("image-pull-policy", default_policy)
deployments = self.cluster_info.get_deployments(image_pull_policy=pull_policy) deployments = self.cluster_info.get_deployments(image_pull_policy=pull_policy)
for deployment in deployments: for deployment in deployments:
# Apply image overrides if provided # Apply image overrides if provided
@ -664,7 +665,8 @@ class K8sDeployer(Deployer):
def _create_jobs(self): def _create_jobs(self):
# Process job compose files into k8s Jobs # Process job compose files into k8s Jobs
jobs = self.cluster_info.get_jobs(image_pull_policy="Always") job_pull_policy = "IfNotPresent" if self.is_kind() else "Always"
jobs = self.cluster_info.get_jobs(image_pull_policy=job_pull_policy)
for job in jobs: for job in jobs:
if opts.o.debug: if opts.o.debug:
print(f"Sending this job: {job}") print(f"Sending this job: {job}")
@ -844,7 +846,6 @@ class K8sDeployer(Deployer):
create_registry_secret( create_registry_secret(
self.cluster_info.spec, self.cluster_info.app_name, self.k8s_namespace self.cluster_info.spec, self.cluster_info.app_name, self.k8s_namespace
) )
create_registry_secret(self.cluster_info.spec, self.cluster_info.app_name, self.k8s_namespace)
self._create_volume_data() self._create_volume_data()
self._create_external_services() self._create_external_services()
@ -1126,7 +1127,8 @@ class K8sDeployer(Deployer):
else: else:
# Non-Helm path: create job from ClusterInfo # Non-Helm path: create job from ClusterInfo
self.connect_api() self.connect_api()
jobs = self.cluster_info.get_jobs(image_pull_policy="Always") job_pull_policy = "IfNotPresent" if self.is_kind() else "Always"
jobs = self.cluster_info.get_jobs(image_pull_policy=job_pull_policy)
# Find the matching job by name # Find the matching job by name
target_name = f"{self.cluster_info.app_name}-job-{job_name}" target_name = f"{self.cluster_info.app_name}-job-{job_name}"
matched_job = None matched_job = None

View File

@ -111,8 +111,10 @@ echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/configmaps/te
deployment_spec_file=${test_deployment_dir}/spec.yml deployment_spec_file=${test_deployment_dir}/spec.yml
sed -i 's/^secrets: {}$/secrets:\n test-secret:\n - TEST_SECRET_KEY/' ${deployment_spec_file} sed -i 's/^secrets: {}$/secrets:\n test-secret:\n - TEST_SECRET_KEY/' ${deployment_spec_file}
# Get the deployment ID for kubectl queries # Get the deployment ID and namespace for kubectl queries
deployment_id=$(cat ${test_deployment_dir}/deployment.yml | cut -d ' ' -f 2) deployment_id=$(cat ${test_deployment_dir}/deployment.yml | cut -d ' ' -f 2)
# Namespace is derived from stack name: laconic-{stack_name}
deployment_ns="laconic-test"
echo "deploy create output file test: passed" echo "deploy create output file test: passed"
# Try to start the deployment (--perform-cluster-management needed on first start # Try to start the deployment (--perform-cluster-management needed on first start
@ -179,17 +181,17 @@ fi
# --- New feature tests: namespace, labels, jobs, secrets --- # --- New feature tests: namespace, labels, jobs, secrets ---
# Check that the pod is in the deployment-specific namespace (not default) # Check that the pod is in the deployment-specific namespace (not default)
ns_pod_count=$(kubectl get pods -n laconic-${deployment_id} -l app=${deployment_id} --no-headers 2>/dev/null | wc -l) ns_pod_count=$(kubectl get pods -n ${deployment_ns} -l app=${deployment_id} --no-headers 2>/dev/null | wc -l)
if [ "$ns_pod_count" -gt 0 ]; then if [ "$ns_pod_count" -gt 0 ]; then
echo "namespace isolation test: passed" echo "namespace isolation test: passed"
else else
echo "namespace isolation test: FAILED" echo "namespace isolation test: FAILED"
echo "Expected pod in namespace laconic-${deployment_id}" echo "Expected pod in namespace ${deployment_ns}"
delete_cluster_exit delete_cluster_exit
fi fi
# Check that the stack label is set on the pod # Check that the stack label is set on the pod
stack_label_count=$(kubectl get pods -n laconic-${deployment_id} -l app.kubernetes.io/stack=test --no-headers 2>/dev/null | wc -l) stack_label_count=$(kubectl get pods -n ${deployment_ns} -l app.kubernetes.io/stack=test --no-headers 2>/dev/null | wc -l)
if [ "$stack_label_count" -gt 0 ]; then if [ "$stack_label_count" -gt 0 ]; then
echo "stack label test: passed" echo "stack label test: passed"
else else
@ -199,7 +201,7 @@ fi
# Check that the job completed successfully # Check that the job completed successfully
for i in {1..30}; do for i in {1..30}; do
job_status=$(kubectl get job ${deployment_id}-job-test-job -n laconic-${deployment_id} -o jsonpath='{.status.succeeded}' 2>/dev/null || true) job_status=$(kubectl get job ${deployment_id}-job-test-job -n ${deployment_ns} -o jsonpath='{.status.succeeded}' 2>/dev/null || true)
if [ "$job_status" == "1" ]; then if [ "$job_status" == "1" ]; then
break break
fi fi
@ -214,7 +216,7 @@ else
fi fi
# Check that the secrets spec results in an envFrom secretRef on the pod # Check that the secrets spec results in an envFrom secretRef on the pod
secret_ref=$(kubectl get pod -n laconic-${deployment_id} -l app=${deployment_id} \ secret_ref=$(kubectl get pod -n ${deployment_ns} -l app=${deployment_id} \
-o jsonpath='{.items[0].spec.containers[0].envFrom[?(@.secretRef.name=="test-secret")].secretRef.name}' 2>/dev/null || true) -o jsonpath='{.items[0].spec.containers[0].envFrom[?(@.secretRef.name=="test-secret")].secretRef.name}' 2>/dev/null || true)
if [ "$secret_ref" == "test-secret" ]; then if [ "$secret_ref" == "test-secret" ]; then
echo "secrets envFrom test: passed" echo "secrets envFrom test: passed"
@ -235,7 +237,7 @@ $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes --sk
# Without this, 'start' fails with 403 Forbidden because the namespace # Without this, 'start' fails with 403 Forbidden because the namespace
# is still in Terminating state. # is still in Terminating state.
for i in {1..60}; do for i in {1..60}; do
if ! kubectl get namespace laconic-${deployment_id} 2>/dev/null | grep -q .; then if ! kubectl get namespace ${deployment_ns} 2>/dev/null | grep -q .; then
break break
fi fi
sleep 2 sleep 2