test: verify label-based stop and --delete-namespace behavior

Update run-deploy-test.sh for the new down() semantics:
- After stop --delete-volumes (without --delete-namespace) assert
  the namespace stays Active and no stack-labeled Deployments,
  Services, ConfigMaps, Secrets, or PVCs remain.
- Drop the 120s wait loop for namespace termination — not needed
  since stop no longer terminates the namespace.
- Exercise the new --delete-namespace flag at end-of-test teardown
  and assert the namespace is actually gone.

Rename delete_cluster_exit -> cleanup_and_exit and have it do a
full teardown (volumes + namespace) so failed CI runs don't leak
state between jobs. Add assert_ns_phase and assert_no_labeled_
resources helpers for the new assertions.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
pull/743/head
Prathamesh Musale 2026-04-16 04:31:46 +00:00
parent cf2269ebdc
commit 98ad60ca03
1 changed files with 63 additions and 31 deletions

View File

@ -23,7 +23,7 @@ wait_for_pods_started () {
done done
# Timed out, error exit # Timed out, error exit
echo "waiting for pods to start: FAILED" echo "waiting for pods to start: FAILED"
delete_cluster_exit cleanup_and_exit
} }
wait_for_log_output () { wait_for_log_output () {
@ -42,15 +42,40 @@ wait_for_log_output () {
done done
# Timed out, error exit # Timed out, error exit
echo "waiting for pods log content: FAILED" echo "waiting for pods log content: FAILED"
delete_cluster_exit cleanup_and_exit
} }
delete_cluster_exit () { cleanup_and_exit () {
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes # Full teardown so CI runners don't leak namespaces/PVs between runs.
$TEST_TARGET_SO deployment --dir $test_deployment_dir \
stop --delete-volumes --delete-namespace --skip-cluster-management || true
exit 1 exit 1
} }
assert_ns_phase () {
local expected=$1
local phase
phase=$(kubectl get namespace ${deployment_ns} -o jsonpath='{.status.phase}' 2>/dev/null || echo "Missing")
if [ "$phase" != "$expected" ]; then
echo "namespace phase test: FAILED (expected ${expected}, got ${phase})"
cleanup_and_exit
fi
}
# Count labeled resources in the deployment namespace. Exit 1 on mismatch.
# Usage: assert_no_labeled_resources <kind>
assert_no_labeled_resources () {
local kind=$1
local count
count=$(kubectl get ${kind} -n ${deployment_ns} \
-l app.kubernetes.io/stack=test --no-headers 2>/dev/null | wc -l)
if [ "$count" -ne 0 ]; then
echo "labeled cleanup test: FAILED (${kind} still present: ${count})"
cleanup_and_exit
fi
}
# Note: eventually this test should be folded into ../deploy/ # Note: eventually this test should be folded into ../deploy/
# but keeping it separate for now for convenience # but keeping it separate for now for convenience
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
@ -130,7 +155,7 @@ if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
else else
echo "deployment logs test: FAILED" echo "deployment logs test: FAILED"
echo "$log_output_3" echo "$log_output_3"
delete_cluster_exit cleanup_and_exit
fi fi
# Check the config variable CERC_TEST_PARAM_1 was passed correctly # Check the config variable CERC_TEST_PARAM_1 was passed correctly
@ -138,7 +163,7 @@ if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
echo "deployment config test: passed" echo "deployment config test: passed"
else else
echo "deployment config test: FAILED" echo "deployment config test: FAILED"
delete_cluster_exit cleanup_and_exit
fi fi
# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file # Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
@ -155,7 +180,7 @@ if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"
echo "deployment ConfigMap test: passed" echo "deployment ConfigMap test: passed"
else else
echo "deployment ConfigMap test: FAILED" echo "deployment ConfigMap test: FAILED"
delete_cluster_exit cleanup_and_exit
fi fi
# Check that the bind-mount volume is mounted. # Check that the bind-mount volume is mounted.
@ -165,7 +190,7 @@ if [[ "$log_output_5" == *"/data: MOUNTED"* ]]; then
else else
echo "deployment bind volumes test: FAILED" echo "deployment bind volumes test: FAILED"
echo "$log_output_5" echo "$log_output_5"
delete_cluster_exit cleanup_and_exit
fi fi
# Check that the provisioner managed volume is mounted. # Check that the provisioner managed volume is mounted.
@ -175,7 +200,7 @@ if [[ "$log_output_6" == *"/data2: MOUNTED"* ]]; then
else else
echo "deployment provisioner volumes test: FAILED" echo "deployment provisioner volumes test: FAILED"
echo "$log_output_6" echo "$log_output_6"
delete_cluster_exit cleanup_and_exit
fi fi
# --- New feature tests: namespace, labels, jobs, secrets --- # --- New feature tests: namespace, labels, jobs, secrets ---
@ -187,7 +212,7 @@ if [ "$ns_pod_count" -gt 0 ]; then
else else
echo "namespace isolation test: FAILED" echo "namespace isolation test: FAILED"
echo "Expected pod in namespace ${deployment_ns}" echo "Expected pod in namespace ${deployment_ns}"
delete_cluster_exit cleanup_and_exit
fi fi
# Check that the stack label is set on the pod # Check that the stack label is set on the pod
@ -196,7 +221,7 @@ if [ "$stack_label_count" -gt 0 ]; then
echo "stack label test: passed" echo "stack label test: passed"
else else
echo "stack label test: FAILED" echo "stack label test: FAILED"
delete_cluster_exit cleanup_and_exit
fi fi
# Check that the job completed successfully # Check that the job completed successfully
@ -212,7 +237,7 @@ if [ "$job_status" == "1" ]; then
else else
echo "job completion test: FAILED" echo "job completion test: FAILED"
echo "Job status.succeeded: ${job_status}" echo "Job status.succeeded: ${job_status}"
delete_cluster_exit cleanup_and_exit
fi fi
# Check that the secrets spec results in an envFrom secretRef on the pod # Check that the secrets spec results in an envFrom secretRef on the pod
@ -223,25 +248,24 @@ if [ "$secret_ref" == "test-secret" ]; then
else else
echo "secrets envFrom test: FAILED" echo "secrets envFrom test: FAILED"
echo "Expected secretRef 'test-secret', got: ${secret_ref}" echo "Expected secretRef 'test-secret', got: ${secret_ref}"
delete_cluster_exit cleanup_and_exit
fi fi
# Stop then start again and check the volume was preserved. # Stop with --delete-volumes (but not --delete-namespace) and verify:
# Use --skip-cluster-management to reuse the existing kind cluster instead of # - namespace stays Active (no termination race on restart)
# destroying and recreating it (which fails on CI runners due to stale etcd/certs # - stack-labeled workloads are gone
# and cgroup detection issues). # - bind-mount data on the host survives; provisioner volumes are recreated
# Use --delete-volumes to clear PVs so fresh PVCs can bind on restart.
# Bind-mount data survives on the host filesystem; provisioner volumes are recreated fresh.
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes --skip-cluster-management $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes --skip-cluster-management
# Wait for the namespace to be fully terminated before restarting.
# Without this, 'start' fails with 403 Forbidden because the namespace assert_ns_phase "Active"
# is still in Terminating state. echo "stop preserves namespace test: passed"
for i in {1..60}; do
if ! kubectl get namespace ${deployment_ns} 2>/dev/null | grep -q .; then for kind in deployment service configmap secret pvc; do
break assert_no_labeled_resources "$kind"
fi
sleep 2
done done
echo "stop cleans labeled resources test: passed"
# Restart — no wait needed, the namespace is still Active.
$TEST_TARGET_SO deployment --dir $test_deployment_dir start --skip-cluster-management $TEST_TARGET_SO deployment --dir $test_deployment_dir start --skip-cluster-management
wait_for_pods_started wait_for_pods_started
wait_for_log_output wait_for_log_output
@ -252,7 +276,7 @@ if [[ "$log_output_10" == *"/data filesystem is old"* ]]; then
echo "Retain bind volumes test: passed" echo "Retain bind volumes test: passed"
else else
echo "Retain bind volumes test: FAILED" echo "Retain bind volumes test: FAILED"
delete_cluster_exit cleanup_and_exit
fi fi
# Provisioner volumes are destroyed when PVs are deleted (--delete-volumes on stop). # Provisioner volumes are destroyed when PVs are deleted (--delete-volumes on stop).
@ -263,9 +287,17 @@ if [[ "$log_output_11" == *"/data2 filesystem is fresh"* ]]; then
echo "Fresh provisioner volumes test: passed" echo "Fresh provisioner volumes test: passed"
else else
echo "Fresh provisioner volumes test: FAILED" echo "Fresh provisioner volumes test: FAILED"
delete_cluster_exit cleanup_and_exit
fi fi
# Stop and clean up # Full teardown: --delete-namespace nukes the namespace after labeled cleanup.
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes # Verify the namespace is actually gone.
$TEST_TARGET_SO deployment --dir $test_deployment_dir \
stop --delete-volumes --delete-namespace --skip-cluster-management
if kubectl get namespace ${deployment_ns} >/dev/null 2>&1; then
echo "delete-namespace test: FAILED (namespace still present)"
exit 1
fi
echo "delete-namespace test: passed"
echo "Test passed" echo "Test passed"