stack-orchestrator/stack_orchestrator/deploy/deployment_context.py

112 lines
4.3 KiB
Python

# Copyright © 2022, 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import hashlib
import os
from pathlib import Path
from stack_orchestrator import constants
from stack_orchestrator.util import get_yaml
from stack_orchestrator.deploy.stack import Stack
from stack_orchestrator.deploy.spec import Spec
class DeploymentContext:
deployment_dir: Path
id: str
deployment_id: str
spec: Spec
stack: Stack
def get_stack_file(self):
return self.deployment_dir.joinpath(constants.stack_file_name)
def get_spec_file(self):
return self.deployment_dir.joinpath(constants.spec_file_name)
def get_env_file(self):
return self.deployment_dir.joinpath(constants.config_file_name)
def get_deployment_file(self):
return self.deployment_dir.joinpath(constants.deployment_file_name)
def get_compose_dir(self):
return self.deployment_dir.joinpath(constants.compose_dir_name)
def get_compose_file(self, name: str):
return self.get_compose_dir() / f"docker-compose-{name}.yml"
def get_cluster_id(self):
"""Identifier of the kind cluster this deployment attaches to.
Shared across deployments that join the same kind cluster. Used
for the kube-config context name (`kind-{cluster-id}`) and for
kind cluster lifecycle ops.
"""
return self.id
def get_deployment_id(self):
"""Identifier of this particular deployment's k8s resources.
Distinct per deployment even when multiple deployments share a
cluster. Used as compose_project_name → app_name → prefix for
all k8s resource names (PVs, ConfigMaps, Deployments, …).
Backward compat: for deployment.yml files written before this
field existed, falls back to cluster-id so existing on-disk
resource names remain stable (no PV renames, no re-bind).
"""
return self.deployment_id
def init(self, dir: Path):
self.deployment_dir = dir.absolute()
self.spec = Spec()
self.spec.init_from_file(self.get_spec_file())
self.stack = Stack(self.spec.obj["stack"])
self.stack.init_from_file(self.get_stack_file())
deployment_file_path = self.get_deployment_file()
if deployment_file_path.exists():
obj = get_yaml().load(open(deployment_file_path, "r"))
self.id = obj[constants.cluster_id_key]
# Fallback to cluster-id for deployments created before the
# deployment-id field was introduced. Keeps existing resource
# names stable across this upgrade.
self.deployment_id = obj.get(
constants.deployment_id_key, self.id
)
# Handle the case of a legacy deployment with no file
# Code below is intended to match the output from _make_default_cluster_name()
# TODO: remove when we no longer need to support legacy deployments
else:
path = os.path.realpath(os.path.abspath(self.get_compose_dir()))
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
self.id = f"{constants.cluster_name_prefix}{hash}"
self.deployment_id = self.id
def modify_yaml(self, file_path: Path, modifier_func):
"""Load a YAML, apply a modification function, and write it back."""
if not file_path.absolute().is_relative_to(self.deployment_dir):
raise ValueError(f"File is not inside deployment directory: {file_path}")
yaml = get_yaml()
with open(file_path, "r") as f:
yaml_data = yaml.load(f)
modifier_func(yaml_data)
with open(file_path, "w") as f:
yaml.dump(yaml_data, f)