2023-11-09 02:12:48 +00:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
set -e
|
|
|
|
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
2024-01-16 22:55:58 +00:00
|
|
|
set -x
|
|
|
|
|
# Dump environment variables for debugging
|
|
|
|
|
echo "Environment variables:"
|
|
|
|
|
env
|
2023-11-09 02:12:48 +00:00
|
|
|
fi
|
2024-01-16 22:55:58 +00:00
|
|
|
|
|
|
|
|
# Helper functions: TODO move into a separate file
|
|
|
|
|
wait_for_pods_started () {
|
2024-01-28 23:21:39 +00:00
|
|
|
for i in {1..50}
|
2024-01-16 22:55:58 +00:00
|
|
|
do
|
|
|
|
|
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
|
|
|
|
|
|
|
|
|
if [[ "$ps_output" == *"Running containers:"* ]]; then
|
|
|
|
|
# if ready, return
|
|
|
|
|
return
|
|
|
|
|
else
|
|
|
|
|
# if not ready, wait
|
|
|
|
|
sleep 5
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
# Timed out, error exit
|
|
|
|
|
echo "waiting for pods to start: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2024-01-16 22:55:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wait_for_log_output () {
|
2024-01-28 23:21:39 +00:00
|
|
|
for i in {1..50}
|
2024-01-16 22:55:58 +00:00
|
|
|
do
|
|
|
|
|
|
|
|
|
|
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
|
|
|
|
|
2026-04-02 09:30:57 +00:00
|
|
|
if [[ ! -z "$log_output" ]] && [[ "$log_output" != *"No logs available"* ]] && [[ "$log_output" != *"Pods not running"* ]]; then
|
2024-01-16 22:55:58 +00:00
|
|
|
# if ready, return
|
|
|
|
|
return
|
|
|
|
|
else
|
|
|
|
|
# if not ready, wait
|
|
|
|
|
sleep 5
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
# Timed out, error exit
|
|
|
|
|
echo "waiting for pods log content: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2024-01-16 22:55:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit () {
|
|
|
|
|
# Full teardown so CI runners don't leak namespaces/PVs between runs.
|
|
|
|
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir \
|
|
|
|
|
stop --delete-volumes --delete-namespace --skip-cluster-management || true
|
2024-01-16 22:55:58 +00:00
|
|
|
exit 1
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-16 06:40:04 +00:00
|
|
|
assert_ns_phase () {
|
|
|
|
|
local expected=$1
|
|
|
|
|
local phase
|
|
|
|
|
phase=$(kubectl get namespace ${deployment_ns} -o jsonpath='{.status.phase}' 2>/dev/null || echo "Missing")
|
|
|
|
|
if [ "$phase" != "$expected" ]; then
|
|
|
|
|
echo "namespace phase test: FAILED (expected ${expected}, got ${phase})"
|
|
|
|
|
cleanup_and_exit
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Count labeled resources in the deployment namespace. down() is
|
|
|
|
|
# synchronous on its own cleanup (waits for PVCs/pods to terminate
|
|
|
|
|
# before returning) so callers can assert immediately.
|
|
|
|
|
# Usage: assert_no_labeled_resources <kind>
|
|
|
|
|
assert_no_labeled_resources () {
|
|
|
|
|
local kind=$1
|
|
|
|
|
local count
|
|
|
|
|
count=$(kubectl get ${kind} -n ${deployment_ns} \
|
|
|
|
|
-l app.kubernetes.io/stack=test --no-headers 2>/dev/null | wc -l)
|
|
|
|
|
if [ "$count" -ne 0 ]; then
|
|
|
|
|
echo "labeled cleanup test: FAILED (${kind} still present: ${count})"
|
|
|
|
|
cleanup_and_exit
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-09 02:12:48 +00:00
|
|
|
# Note: eventually this test should be folded into ../deploy/
|
|
|
|
|
# but keeping it separate for now for convenience
|
|
|
|
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
|
|
|
|
# Set a non-default repo dir
|
|
|
|
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
|
|
|
|
echo "Testing this package: $TEST_TARGET_SO"
|
|
|
|
|
echo "Test version command"
|
|
|
|
|
reported_version_string=$( $TEST_TARGET_SO version )
|
|
|
|
|
echo "Version reported is: ${reported_version_string}"
|
|
|
|
|
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
|
|
|
|
rm -rf $CERC_REPO_BASE_DIR
|
|
|
|
|
mkdir -p $CERC_REPO_BASE_DIR
|
2024-01-17 04:15:21 +00:00
|
|
|
$TEST_TARGET_SO --stack test setup-repositories
|
|
|
|
|
$TEST_TARGET_SO --stack test build-containers
|
|
|
|
|
# Test basic stack-orchestrator deploy to k8s
|
2023-11-09 02:12:48 +00:00
|
|
|
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
|
|
|
|
|
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
|
2023-11-20 16:12:57 +00:00
|
|
|
$TEST_TARGET_SO --stack test deploy --deploy-to k8s-kind init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
|
2023-11-09 02:12:48 +00:00
|
|
|
# Check the file now exists
|
|
|
|
|
if [ ! -f "$test_deployment_spec" ]; then
|
|
|
|
|
echo "deploy init test: spec file not present"
|
|
|
|
|
echo "deploy init test: FAILED"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
echo "deploy init test: passed"
|
2024-02-14 21:45:01 +00:00
|
|
|
|
|
|
|
|
# Switch to a full path for bind mount.
|
|
|
|
|
sed -i "s|^\(\s*test-data-bind:$\)$|\1 ${test_deployment_dir}/data/test-data-bind|" $test_deployment_spec
|
|
|
|
|
|
2023-11-09 02:12:48 +00:00
|
|
|
$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
|
|
|
|
# Check the deployment dir exists
|
|
|
|
|
if [ ! -d "$test_deployment_dir" ]; then
|
|
|
|
|
echo "deploy create test: deployment directory not present"
|
|
|
|
|
echo "deploy create test: FAILED"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
echo "deploy create test: passed"
|
|
|
|
|
# Check the file writted by the create command in the stack now exists
|
|
|
|
|
if [ ! -f "$test_deployment_dir/create-file" ]; then
|
|
|
|
|
echo "deploy create test: create output file not present"
|
|
|
|
|
echo "deploy create test: FAILED"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
# And has the right content
|
|
|
|
|
create_file_content=$(<$test_deployment_dir/create-file)
|
|
|
|
|
if [ ! "$create_file_content" == "create-command-output-data" ]; then
|
|
|
|
|
echo "deploy create test: create output file contents not correct"
|
|
|
|
|
echo "deploy create test: FAILED"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
2024-02-05 20:15:11 +00:00
|
|
|
|
|
|
|
|
# Add a config file to be picked up by the ConfigMap before starting.
|
2024-02-14 23:50:09 +00:00
|
|
|
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/configmaps/test-config/test_config
|
2024-02-05 20:15:11 +00:00
|
|
|
|
2026-03-11 03:56:21 +00:00
|
|
|
# Add secrets to the deployment spec (references a pre-existing k8s Secret by name).
|
|
|
|
|
# deploy init already writes an empty 'secrets: {}' key, so we replace it
|
|
|
|
|
# rather than appending (ruamel.yaml rejects duplicate keys).
|
|
|
|
|
deployment_spec_file=${test_deployment_dir}/spec.yml
|
|
|
|
|
sed -i 's/^secrets: {}$/secrets:\n test-secret:\n - TEST_SECRET_KEY/' ${deployment_spec_file}
|
|
|
|
|
|
2026-04-01 23:34:51 +00:00
|
|
|
# Get the deployment ID and namespace for kubectl queries
|
2026-03-11 03:56:21 +00:00
|
|
|
deployment_id=$(cat ${test_deployment_dir}/deployment.yml | cut -d ' ' -f 2)
|
2026-04-01 23:34:51 +00:00
|
|
|
# Namespace is derived from stack name: laconic-{stack_name}
|
|
|
|
|
deployment_ns="laconic-test"
|
2026-03-11 03:56:21 +00:00
|
|
|
|
2023-11-09 02:12:48 +00:00
|
|
|
echo "deploy create output file test: passed"
|
2026-04-01 21:50:53 +00:00
|
|
|
# Try to start the deployment (--perform-cluster-management needed on first start
|
|
|
|
|
# because 'start' defaults to --skip-cluster-management)
|
|
|
|
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start --perform-cluster-management
|
2024-01-16 22:55:58 +00:00
|
|
|
wait_for_pods_started
|
2023-11-09 02:12:48 +00:00
|
|
|
# Check logs command works
|
2024-01-16 22:55:58 +00:00
|
|
|
wait_for_log_output
|
2024-02-14 21:45:01 +00:00
|
|
|
sleep 1
|
2023-11-09 02:12:48 +00:00
|
|
|
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
2024-02-14 21:45:01 +00:00
|
|
|
if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
|
2023-11-09 02:12:48 +00:00
|
|
|
echo "deployment logs test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "deployment logs test: FAILED"
|
2026-04-02 09:30:57 +00:00
|
|
|
echo "$log_output_3"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2023-11-09 02:12:48 +00:00
|
|
|
fi
|
2024-02-08 19:41:57 +00:00
|
|
|
|
2023-11-09 02:12:48 +00:00
|
|
|
# Check the config variable CERC_TEST_PARAM_1 was passed correctly
|
|
|
|
|
if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
|
|
|
|
|
echo "deployment config test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "deployment config test: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2024-01-16 22:55:58 +00:00
|
|
|
fi
|
2024-02-05 20:15:11 +00:00
|
|
|
|
2024-02-08 19:41:57 +00:00
|
|
|
# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
|
|
|
|
|
if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then
|
|
|
|
|
echo "deployment compose config test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "deployment compose config test: FAILED"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
|
2024-02-05 20:15:11 +00:00
|
|
|
# Check that the ConfigMap is mounted and contains the expected content.
|
|
|
|
|
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
|
|
|
|
if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
|
|
|
|
|
echo "deployment ConfigMap test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "deployment ConfigMap test: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2024-02-05 20:15:11 +00:00
|
|
|
fi
|
|
|
|
|
|
2024-02-14 21:45:01 +00:00
|
|
|
# Check that the bind-mount volume is mounted.
|
|
|
|
|
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
|
|
|
|
if [[ "$log_output_5" == *"/data: MOUNTED"* ]]; then
|
|
|
|
|
echo "deployment bind volumes test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "deployment bind volumes test: FAILED"
|
2026-04-02 09:30:57 +00:00
|
|
|
echo "$log_output_5"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2024-02-14 21:45:01 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Check that the provisioner managed volume is mounted.
|
|
|
|
|
log_output_6=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
|
|
|
|
if [[ "$log_output_6" == *"/data2: MOUNTED"* ]]; then
|
|
|
|
|
echo "deployment provisioner volumes test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "deployment provisioner volumes test: FAILED"
|
2026-04-02 09:30:57 +00:00
|
|
|
echo "$log_output_6"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2024-02-14 21:45:01 +00:00
|
|
|
fi
|
|
|
|
|
|
2026-03-11 03:56:21 +00:00
|
|
|
# --- New feature tests: namespace, labels, jobs, secrets ---
|
|
|
|
|
|
|
|
|
|
# Check that the pod is in the deployment-specific namespace (not default)
|
2026-04-01 23:34:51 +00:00
|
|
|
ns_pod_count=$(kubectl get pods -n ${deployment_ns} -l app=${deployment_id} --no-headers 2>/dev/null | wc -l)
|
2026-03-11 03:56:21 +00:00
|
|
|
if [ "$ns_pod_count" -gt 0 ]; then
|
|
|
|
|
echo "namespace isolation test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "namespace isolation test: FAILED"
|
2026-04-01 23:34:51 +00:00
|
|
|
echo "Expected pod in namespace ${deployment_ns}"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2026-03-11 03:56:21 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Check that the stack label is set on the pod
|
2026-04-01 23:34:51 +00:00
|
|
|
stack_label_count=$(kubectl get pods -n ${deployment_ns} -l app.kubernetes.io/stack=test --no-headers 2>/dev/null | wc -l)
|
2026-03-11 03:56:21 +00:00
|
|
|
if [ "$stack_label_count" -gt 0 ]; then
|
|
|
|
|
echo "stack label test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "stack label test: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2026-03-11 03:56:21 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Check that the job completed successfully
|
|
|
|
|
for i in {1..30}; do
|
2026-04-01 23:34:51 +00:00
|
|
|
job_status=$(kubectl get job ${deployment_id}-job-test-job -n ${deployment_ns} -o jsonpath='{.status.succeeded}' 2>/dev/null || true)
|
2026-03-11 03:56:21 +00:00
|
|
|
if [ "$job_status" == "1" ]; then
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
sleep 2
|
|
|
|
|
done
|
|
|
|
|
if [ "$job_status" == "1" ]; then
|
|
|
|
|
echo "job completion test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "job completion test: FAILED"
|
|
|
|
|
echo "Job status.succeeded: ${job_status}"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2026-03-11 03:56:21 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Check that the secrets spec results in an envFrom secretRef on the pod
|
2026-04-01 23:34:51 +00:00
|
|
|
secret_ref=$(kubectl get pod -n ${deployment_ns} -l app=${deployment_id} \
|
2026-03-11 03:56:21 +00:00
|
|
|
-o jsonpath='{.items[0].spec.containers[0].envFrom[?(@.secretRef.name=="test-secret")].secretRef.name}' 2>/dev/null || true)
|
|
|
|
|
if [ "$secret_ref" == "test-secret" ]; then
|
|
|
|
|
echo "secrets envFrom test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "secrets envFrom test: FAILED"
|
|
|
|
|
echo "Expected secretRef 'test-secret', got: ${secret_ref}"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2026-03-11 03:56:21 +00:00
|
|
|
fi
|
|
|
|
|
|
2026-04-16 06:40:04 +00:00
|
|
|
# Stop with --delete-volumes (but not --delete-namespace) and verify:
|
|
|
|
|
# - namespace stays Active (no termination race on restart)
|
|
|
|
|
# - stack-labeled workloads are gone
|
|
|
|
|
# - bind-mount data on the host survives; provisioner volumes are recreated
|
2026-03-11 03:56:21 +00:00
|
|
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes --skip-cluster-management
|
2026-04-16 06:40:04 +00:00
|
|
|
|
|
|
|
|
assert_ns_phase "Active"
|
|
|
|
|
echo "stop preserves namespace test: passed"
|
|
|
|
|
|
|
|
|
|
for kind in deployment job ingress service configmap secret pvc pod; do
|
|
|
|
|
assert_no_labeled_resources "$kind"
|
2026-03-11 03:56:21 +00:00
|
|
|
done
|
2026-04-16 06:40:04 +00:00
|
|
|
echo "stop cleans labeled resources test: passed"
|
|
|
|
|
|
|
|
|
|
# Restart — no wait needed, the namespace is still Active.
|
2026-03-11 03:56:21 +00:00
|
|
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start --skip-cluster-management
|
2024-01-16 22:55:58 +00:00
|
|
|
wait_for_pods_started
|
|
|
|
|
wait_for_log_output
|
2024-02-14 21:45:01 +00:00
|
|
|
sleep 1
|
|
|
|
|
|
|
|
|
|
log_output_10=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
|
|
|
|
if [[ "$log_output_10" == *"/data filesystem is old"* ]]; then
|
|
|
|
|
echo "Retain bind volumes test: passed"
|
2024-01-16 22:55:58 +00:00
|
|
|
else
|
2024-02-14 21:45:01 +00:00
|
|
|
echo "Retain bind volumes test: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
2023-11-09 02:12:48 +00:00
|
|
|
fi
|
2024-02-14 21:45:01 +00:00
|
|
|
|
2026-03-11 03:56:21 +00:00
|
|
|
# Provisioner volumes are destroyed when PVs are deleted (--delete-volumes on stop).
|
|
|
|
|
# Unlike bind-mount volumes whose data persists on the host, provisioner storage
|
|
|
|
|
# is gone, so the volume appears fresh after restart.
|
2024-02-14 21:45:01 +00:00
|
|
|
log_output_11=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
|
|
|
|
if [[ "$log_output_11" == *"/data2 filesystem is fresh"* ]]; then
|
|
|
|
|
echo "Fresh provisioner volumes test: passed"
|
|
|
|
|
else
|
|
|
|
|
echo "Fresh provisioner volumes test: FAILED"
|
2026-04-16 06:40:04 +00:00
|
|
|
cleanup_and_exit
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Full teardown: --delete-namespace nukes the namespace after labeled cleanup.
|
|
|
|
|
# Verify the namespace is actually gone.
|
|
|
|
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir \
|
|
|
|
|
stop --delete-volumes --delete-namespace --skip-cluster-management
|
|
|
|
|
if kubectl get namespace ${deployment_ns} >/dev/null 2>&1; then
|
|
|
|
|
echo "delete-namespace test: FAILED (namespace still present)"
|
|
|
|
|
exit 1
|
2024-02-14 21:45:01 +00:00
|
|
|
fi
|
2026-04-16 06:40:04 +00:00
|
|
|
echo "delete-namespace test: passed"
|
2024-02-14 21:45:01 +00:00
|
|
|
|
2023-11-09 02:12:48 +00:00
|
|
|
echo "Test passed"
|