deploy: restart now force-recreates compose containers (#752)
Publish / Gate: k8s deploy e2e (push) Failing after 3s
Details
Deploy Test / Run deploy test suite (push) Failing after 0s
Details
Publish / Build and publish (push) Has been skipped
Details
Smoke Test / Run basic test suite (push) Failing after 0s
Details
Lint Checks / Run linter (push) Failing after 0s
Details
Webapp Test / Run webapp test suite (push) Failing after 0s
Details
Publish / Gate: k8s deploy e2e (push) Failing after 3s
Details
Deploy Test / Run deploy test suite (push) Failing after 0s
Details
Publish / Build and publish (push) Has been skipped
Details
Smoke Test / Run basic test suite (push) Failing after 0s
Details
Lint Checks / Run linter (push) Failing after 0s
Details
Webapp Test / Run webapp test suite (push) Failing after 0s
Details
Operator-reported: editing source files mounted into a service via bind volumes (alert rules, dashboards, scripts, templates, telegraf config) and running 'laconic-so deployment ... restart' did not take effect. Operator had to fall back to 'stop && start' to pick up changes. Root cause: 'restart' calls up_operation, which translates to 'docker compose up -d'. Compose's up only recreates a container when the *service definition* itself (image, env, ports, volume declarations) changes. Bind-mount target file content is not part of that hash, so the running container kept its old in-memory state (e.g. Grafana's pre-edit provisioning). Add force_recreate kwarg through the deployer interface and have restart pass force_recreate=True. compose path threads through to python_on_whales' compose.up(force_recreate=...). k8s path accepts the kwarg but is a no-op for now (rolling update on unchanged-spec needs a separate fix that stamps the kubectl.kubernetes.io/restartedAt annotation on managed Deployments; tracked in a follow-up).main v1.1.0-2ff7e5e-202605061003
parent
cf0e230b66
commit
2ff7e5eb77
|
|
@ -48,10 +48,21 @@ class DockerDeployer(Deployer):
|
|||
self.compose_project_name = compose_project_name
|
||||
self.compose_env_file = compose_env_file
|
||||
|
||||
def up(self, detach, skip_cluster_management, services, image_overrides=None):
|
||||
def up(
|
||||
self,
|
||||
detach,
|
||||
skip_cluster_management,
|
||||
services,
|
||||
image_overrides=None,
|
||||
force_recreate=False,
|
||||
):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.up(detach=detach, services=services)
|
||||
return self.docker.compose.up(
|
||||
detach=detach,
|
||||
services=services,
|
||||
force_recreate=force_recreate,
|
||||
)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
|
|
|
|||
|
|
@ -142,6 +142,7 @@ def up_operation(
|
|||
stay_attached=False,
|
||||
skip_cluster_management=False,
|
||||
image_overrides=None,
|
||||
force_recreate=False,
|
||||
):
|
||||
global_context = ctx.parent.parent.obj
|
||||
deploy_context = ctx.obj
|
||||
|
|
@ -161,6 +162,7 @@ def up_operation(
|
|||
skip_cluster_management=skip_cluster_management,
|
||||
services=services_list,
|
||||
image_overrides=image_overrides,
|
||||
force_recreate=force_recreate,
|
||||
)
|
||||
for post_start_command in cluster_context.post_start_commands:
|
||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,14 @@ from typing import Optional
|
|||
|
||||
class Deployer(ABC):
|
||||
@abstractmethod
|
||||
def up(self, detach, skip_cluster_management, services, image_overrides=None):
|
||||
def up(
|
||||
self,
|
||||
detach,
|
||||
skip_cluster_management,
|
||||
services,
|
||||
image_overrides=None,
|
||||
force_recreate=False,
|
||||
):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
|||
|
|
@ -471,12 +471,18 @@ def restart(ctx, stack_path, spec_file, config_file, force, expected_ip, image):
|
|||
ctx, deployment_context, maintenance_svc, image_overrides
|
||||
)
|
||||
else:
|
||||
# force_recreate=True so source-file edits (alert rules, dashboards,
|
||||
# entrypoint scripts, etc. mounted via bind volumes) are picked up.
|
||||
# docker compose up -d alone is a no-op when the service definition
|
||||
# itself is unchanged, leaving the running container with stale
|
||||
# in-memory state.
|
||||
up_operation(
|
||||
ctx,
|
||||
services_list=None,
|
||||
stay_attached=False,
|
||||
skip_cluster_management=True,
|
||||
image_overrides=image_overrides or None,
|
||||
force_recreate=True,
|
||||
)
|
||||
|
||||
# Restore cwd after both create_operation and up_operation have run.
|
||||
|
|
@ -514,12 +520,15 @@ def _restart_with_maintenance(
|
|||
|
||||
# Step 1: Apply the full deployment (creates/updates all pods + services)
|
||||
# This ensures maintenance pod exists before we swap Ingress to it.
|
||||
# force_recreate intent matches the non-maintenance restart path; the
|
||||
# k8s deployer currently ignores the flag (TODO in deploy_k8s.up).
|
||||
up_operation(
|
||||
ctx,
|
||||
services_list=None,
|
||||
stay_attached=False,
|
||||
skip_cluster_management=True,
|
||||
image_overrides=image_overrides or None,
|
||||
force_recreate=True,
|
||||
)
|
||||
|
||||
# Parse maintenance service spec: "container-name:port"
|
||||
|
|
|
|||
|
|
@ -987,7 +987,19 @@ class K8sDeployer(Deployer):
|
|||
else:
|
||||
raise
|
||||
|
||||
def up(self, detach, skip_cluster_management, services, image_overrides=None):
|
||||
def up(
|
||||
self,
|
||||
detach,
|
||||
skip_cluster_management,
|
||||
services,
|
||||
image_overrides=None,
|
||||
force_recreate=False,
|
||||
):
|
||||
# TODO: honor force_recreate by stamping the
|
||||
# kubectl.kubernetes.io/restartedAt annotation on managed
|
||||
# Deployments so a rollout occurs even when the manifest is
|
||||
# unchanged. Today this method is a no-op for that flag.
|
||||
# Tracked separately from the compose-side fix.
|
||||
# Merge spec-level image overrides with CLI overrides
|
||||
spec_overrides = self.cluster_info.spec.get("image-overrides", {})
|
||||
if spec_overrides:
|
||||
|
|
|
|||
Loading…
Reference in New Issue