Fix pyright type errors across codebase
- Add pyrightconfig.json for pyright 1.1.408 TOML parsing workaround - Add NoReturn annotations to fatal() functions for proper type narrowing - Add None checks and assertions after require=True get_record() calls - Fix AttrDict class with __getattr__ for dynamic attribute access - Add type annotations and casts for Kubernetes client objects - Store compose config as DockerDeployer instance attributes - Filter None values from dotenv and environment mappings - Use hasattr/getattr patterns for optional container attributes Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>helm-charts-with-caddy
parent
cd3d908d0d
commit
dd856af2d3
|
|
@ -71,14 +71,6 @@ typeCheckingMode = "basic"
|
||||||
reportMissingImports = "none"
|
reportMissingImports = "none"
|
||||||
reportMissingModuleSource = "none"
|
reportMissingModuleSource = "none"
|
||||||
reportUnusedImport = "error"
|
reportUnusedImport = "error"
|
||||||
# Disable common issues in existing codebase - can be enabled incrementally
|
|
||||||
reportGeneralTypeIssues = "none"
|
|
||||||
reportOptionalMemberAccess = "none"
|
|
||||||
reportOptionalSubscript = "none"
|
|
||||||
reportOptionalCall = "none"
|
|
||||||
reportOptionalIterable = "none"
|
|
||||||
reportUnboundVariable = "warning"
|
|
||||||
reportUnusedExpression = "none"
|
|
||||||
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
||||||
exclude = ["**/build/**", "**/__pycache__/**"]
|
exclude = ["**/build/**", "**/__pycache__/**"]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"pythonVersion": "3.9",
|
||||||
|
"typeCheckingMode": "basic",
|
||||||
|
"reportMissingImports": "none",
|
||||||
|
"reportMissingModuleSource": "none",
|
||||||
|
"reportUnusedImport": "error",
|
||||||
|
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
||||||
|
"exclude": ["**/build/**", "**/__pycache__/**"]
|
||||||
|
}
|
||||||
|
|
@ -23,7 +23,7 @@ def get_stack(config, stack):
|
||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return base_stack(config, stack)
|
return default_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
@ -40,6 +40,16 @@ class base_stack(ABC):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class default_stack(base_stack):
|
||||||
|
"""Default stack implementation for stacks without specific handling."""
|
||||||
|
|
||||||
|
def ensure_available(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_url(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class package_registry_stack(base_stack):
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
|
|
|
||||||
|
|
@ -248,7 +248,7 @@ def setup(
|
||||||
|
|
||||||
network_dir = Path(parameters.network_dir).absolute()
|
network_dir = Path(parameters.network_dir).absolute()
|
||||||
laconicd_home_path_in_container = "/laconicd-home"
|
laconicd_home_path_in_container = "/laconicd-home"
|
||||||
mounts = [VolumeMapping(network_dir, laconicd_home_path_in_container)]
|
mounts = [VolumeMapping(str(network_dir), laconicd_home_path_in_container)]
|
||||||
|
|
||||||
if phase == SetupPhase.INITIALIZE:
|
if phase == SetupPhase.INITIALIZE:
|
||||||
# We want to create the directory so if it exists that's an error
|
# We want to create the directory so if it exists that's an error
|
||||||
|
|
@ -379,6 +379,7 @@ def setup(
|
||||||
parameters.gentx_address_list
|
parameters.gentx_address_list
|
||||||
)
|
)
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
|
outputk = None
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
|
|
@ -389,7 +390,7 @@ def setup(
|
||||||
"--keyring-backend test",
|
"--keyring-backend test",
|
||||||
mounts,
|
mounts,
|
||||||
)
|
)
|
||||||
if options.debug:
|
if options.debug and outputk is not None:
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
# Copy the gentx json files into our network dir
|
# Copy the gentx json files into our network dir
|
||||||
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
@ -31,7 +32,7 @@ def setup(command_context: DeployCommandContext, parameters, extra_args):
|
||||||
host_directory = "./container-output-dir"
|
host_directory = "./container-output-dir"
|
||||||
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
||||||
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
||||||
mounts = [VolumeMapping(host_directory_absolute, "/data")]
|
mounts = [VolumeMapping(str(host_directory_absolute), "/data")]
|
||||||
output, status = run_container_command(
|
output, status = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"test",
|
"test",
|
||||||
|
|
@ -45,9 +46,9 @@ def init(command_context: DeployCommandContext):
|
||||||
return yaml.load(default_spec_file_content)
|
return yaml.load(default_spec_file_content)
|
||||||
|
|
||||||
|
|
||||||
def create(command_context: DeployCommandContext, extra_args):
|
def create(deployment_context: DeploymentContext, extra_args):
|
||||||
data = "create-command-output-data"
|
data = "create-command-output-data"
|
||||||
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
output_file_path = deployment_context.deployment_dir.joinpath("create-file")
|
||||||
with open(output_file_path, "w+") as output_file:
|
with open(output_file_path, "w+") as output_file:
|
||||||
output_file.write(data)
|
output_file.write(data)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
from stack_orchestrator.deploy.deployer import (
|
from stack_orchestrator.deploy.deployer import (
|
||||||
Deployer,
|
Deployer,
|
||||||
|
|
@ -30,11 +31,11 @@ class DockerDeployer(Deployer):
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
type,
|
type: str,
|
||||||
deployment_context: DeploymentContext,
|
deployment_context: Optional[DeploymentContext],
|
||||||
compose_files,
|
compose_files: list,
|
||||||
compose_project_name,
|
compose_project_name: Optional[str],
|
||||||
compose_env_file,
|
compose_env_file: Optional[str],
|
||||||
) -> None:
|
) -> None:
|
||||||
self.docker = DockerClient(
|
self.docker = DockerClient(
|
||||||
compose_files=compose_files,
|
compose_files=compose_files,
|
||||||
|
|
@ -42,6 +43,10 @@ class DockerDeployer(Deployer):
|
||||||
compose_env_file=compose_env_file,
|
compose_env_file=compose_env_file,
|
||||||
)
|
)
|
||||||
self.type = type
|
self.type = type
|
||||||
|
# Store these for later use in run_job
|
||||||
|
self.compose_files = compose_files
|
||||||
|
self.compose_project_name = compose_project_name
|
||||||
|
self.compose_env_file = compose_env_file
|
||||||
|
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
|
|
@ -121,7 +126,7 @@ class DockerDeployer(Deployer):
|
||||||
try:
|
try:
|
||||||
return self.docker.run(
|
return self.docker.run(
|
||||||
image=image,
|
image=image,
|
||||||
command=command,
|
command=command if command else [],
|
||||||
user=user,
|
user=user,
|
||||||
volumes=volumes,
|
volumes=volumes,
|
||||||
entrypoint=entrypoint,
|
entrypoint=entrypoint,
|
||||||
|
|
@ -133,17 +138,17 @@ class DockerDeployer(Deployer):
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def run_job(self, job_name: str, release_name: str = None):
|
def run_job(self, job_name: str, release_name: Optional[str] = None):
|
||||||
# release_name is ignored for Docker deployments (only used for K8s/Helm)
|
# release_name is ignored for Docker deployments (only used for K8s/Helm)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
# Find job compose file in compose-jobs directory
|
# Find job compose file in compose-jobs directory
|
||||||
# The deployment should have compose-jobs/docker-compose-<job_name>.yml
|
# The deployment should have compose-jobs/docker-compose-<job_name>.yml
|
||||||
if not self.docker.compose_files:
|
if not self.compose_files:
|
||||||
raise DeployerException("No compose files configured")
|
raise DeployerException("No compose files configured")
|
||||||
|
|
||||||
# Deployment directory is parent of compose directory
|
# Deployment directory is parent of compose directory
|
||||||
compose_dir = Path(self.docker.compose_files[0]).parent
|
compose_dir = Path(self.compose_files[0]).parent
|
||||||
deployment_dir = compose_dir.parent
|
deployment_dir = compose_dir.parent
|
||||||
job_compose_file = (
|
job_compose_file = (
|
||||||
deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
|
deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
|
||||||
|
|
@ -162,8 +167,8 @@ class DockerDeployer(Deployer):
|
||||||
# This allows the job to access volumes from the main deployment
|
# This allows the job to access volumes from the main deployment
|
||||||
job_docker = DockerClient(
|
job_docker = DockerClient(
|
||||||
compose_files=[job_compose_file],
|
compose_files=[job_compose_file],
|
||||||
compose_project_name=self.docker.compose_project_name,
|
compose_project_name=self.compose_project_name,
|
||||||
compose_env_file=self.docker.compose_env_file,
|
compose_env_file=self.compose_env_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run the job with --rm flag to remove container after completion
|
# Run the job with --rm flag to remove container after completion
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ import os
|
||||||
import sys
|
import sys
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from importlib import resources
|
from importlib import resources
|
||||||
|
from typing import Optional
|
||||||
import subprocess
|
import subprocess
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
@ -35,8 +36,9 @@ from stack_orchestrator.util import (
|
||||||
stack_is_in_deployment,
|
stack_is_in_deployment,
|
||||||
resolve_compose_file,
|
resolve_compose_file,
|
||||||
)
|
)
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
|
from stack_orchestrator.deploy.deployer import DeployerException
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
||||||
|
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer
|
||||||
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.deployment_create import create as deployment_create
|
from stack_orchestrator.deploy.deployment_create import create as deployment_create
|
||||||
|
|
@ -91,7 +93,7 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
||||||
|
|
||||||
def create_deploy_context(
|
def create_deploy_context(
|
||||||
global_context,
|
global_context,
|
||||||
deployment_context: DeploymentContext,
|
deployment_context: Optional[DeploymentContext],
|
||||||
stack,
|
stack,
|
||||||
include,
|
include,
|
||||||
exclude,
|
exclude,
|
||||||
|
|
@ -256,7 +258,7 @@ def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||||
print(stream_content.decode("utf-8"), end="")
|
print(stream_content.decode("utf-8"), end="")
|
||||||
|
|
||||||
|
|
||||||
def run_job_operation(ctx, job_name: str, helm_release: str = None):
|
def run_job_operation(ctx, job_name: str, helm_release: Optional[str] = None):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
print(f"Running job: {job_name}")
|
print(f"Running job: {job_name}")
|
||||||
|
|
@ -320,22 +322,24 @@ def get_stack_status(ctx, stack):
|
||||||
ctx_copy.stack = stack
|
ctx_copy.stack = stack
|
||||||
|
|
||||||
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
||||||
deployer = Deployer(
|
deployer = DockerDeployer(
|
||||||
|
type="compose",
|
||||||
|
deployment_context=None,
|
||||||
compose_files=cluster_context.compose_files,
|
compose_files=cluster_context.compose_files,
|
||||||
compose_project_name=cluster_context.cluster,
|
compose_project_name=cluster_context.cluster,
|
||||||
|
compose_env_file=cluster_context.env_file,
|
||||||
)
|
)
|
||||||
# TODO: refactor to avoid duplicating this code above
|
# TODO: refactor to avoid duplicating this code above
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print("Running compose ps")
|
print("Running compose ps")
|
||||||
container_list = deployer.ps()
|
container_list = deployer.ps()
|
||||||
if len(container_list) > 0:
|
if container_list is None or len(container_list) == 0:
|
||||||
if ctx.debug:
|
|
||||||
print(f"Container list from compose ps: {container_list}")
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print("No containers found from compose ps")
|
print("No containers found from compose ps")
|
||||||
False
|
return False
|
||||||
|
if ctx.debug:
|
||||||
|
print(f"Container list from compose ps: {container_list}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _make_runtime_env(ctx):
|
def _make_runtime_env(ctx):
|
||||||
|
|
@ -394,14 +398,17 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
all_pods = pod_list_file.read().splitlines()
|
all_pods = pod_list_file.read().splitlines()
|
||||||
|
|
||||||
pods_in_scope = []
|
pods_in_scope = []
|
||||||
|
cluster_config = None
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
if stack_config is not None:
|
||||||
pods_in_scope = stack_config["pods"]
|
# TODO: syntax check the input here
|
||||||
cluster_config = stack_config["config"] if "config" in stack_config else None
|
pods_in_scope = stack_config["pods"]
|
||||||
|
cluster_config = (
|
||||||
|
stack_config["config"] if "config" in stack_config else None
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
pods_in_scope = all_pods
|
pods_in_scope = all_pods
|
||||||
cluster_config = None
|
|
||||||
|
|
||||||
# Convert all pod definitions to v1.1 format
|
# Convert all pod definitions to v1.1 format
|
||||||
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from typing import List, Mapping
|
from typing import List, Mapping, Optional
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from stack_orchestrator.command_types import CommandOptions
|
from stack_orchestrator.command_types import CommandOptions
|
||||||
from stack_orchestrator.deploy.deployer import Deployer
|
from stack_orchestrator.deploy.deployer import Deployer
|
||||||
|
|
@ -23,19 +23,19 @@ from stack_orchestrator.deploy.deployer import Deployer
|
||||||
class ClusterContext:
|
class ClusterContext:
|
||||||
# TODO: this should be in its own object not stuffed in here
|
# TODO: this should be in its own object not stuffed in here
|
||||||
options: CommandOptions
|
options: CommandOptions
|
||||||
cluster: str
|
cluster: Optional[str]
|
||||||
compose_files: List[str]
|
compose_files: List[str]
|
||||||
pre_start_commands: List[str]
|
pre_start_commands: List[str]
|
||||||
post_start_commands: List[str]
|
post_start_commands: List[str]
|
||||||
config: str
|
config: Optional[str]
|
||||||
env_file: str
|
env_file: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DeployCommandContext:
|
class DeployCommandContext:
|
||||||
stack: str
|
stack: str
|
||||||
cluster_context: ClusterContext
|
cluster_context: ClusterContext
|
||||||
deployer: Deployer
|
deployer: Optional[Deployer]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|
|
||||||
|
|
@ -82,7 +82,11 @@ def run_container_command(
|
||||||
ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]
|
ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]
|
||||||
):
|
):
|
||||||
deployer = ctx.deployer
|
deployer = ctx.deployer
|
||||||
|
if deployer is None:
|
||||||
|
raise ValueError("Deployer is not configured")
|
||||||
container_image = _container_image_from_service(ctx.stack, service)
|
container_image = _container_image_from_service(ctx.stack, service)
|
||||||
|
if container_image is None:
|
||||||
|
raise ValueError(f"Container image not found for service: {service}")
|
||||||
docker_volumes = _volumes_to_docker(mounts)
|
docker_volumes = _volumes_to_docker(mounts)
|
||||||
if ctx.cluster_context.options.debug:
|
if ctx.cluster_context.options.debug:
|
||||||
print(f"Running this command in {service} container: {command}")
|
print(f"Running this command in {service} container: {command}")
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
class Deployer(ABC):
|
class Deployer(ABC):
|
||||||
|
|
@ -65,7 +66,7 @@ class Deployer(ABC):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def run_job(self, job_name: str, release_name: str = None):
|
def run_job(self, job_name: str, release_name: Optional[str] = None):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -58,6 +58,8 @@ def _get_ports(stack):
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||||
|
if pod_file_path is None:
|
||||||
|
continue
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "services" in parsed_pod_file:
|
if "services" in parsed_pod_file:
|
||||||
for svc_name, svc in parsed_pod_file["services"].items():
|
for svc_name, svc in parsed_pod_file["services"].items():
|
||||||
|
|
@ -92,6 +94,8 @@ def _get_named_volumes(stack):
|
||||||
|
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||||
|
if pod_file_path is None:
|
||||||
|
continue
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "volumes" in parsed_pod_file:
|
if "volumes" in parsed_pod_file:
|
||||||
volumes = parsed_pod_file["volumes"]
|
volumes = parsed_pod_file["volumes"]
|
||||||
|
|
@ -202,6 +206,8 @@ def call_stack_deploy_init(deploy_command_context):
|
||||||
for python_file_path in python_file_paths:
|
for python_file_path in python_file_paths:
|
||||||
if python_file_path.exists():
|
if python_file_path.exists():
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
continue
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
if _has_method(imported_stack, "init"):
|
if _has_method(imported_stack, "init"):
|
||||||
|
|
@ -228,6 +234,8 @@ def call_stack_deploy_setup(
|
||||||
for python_file_path in python_file_paths:
|
for python_file_path in python_file_paths:
|
||||||
if python_file_path.exists():
|
if python_file_path.exists():
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
continue
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
if _has_method(imported_stack, "setup"):
|
if _has_method(imported_stack, "setup"):
|
||||||
|
|
@ -243,6 +251,8 @@ def call_stack_deploy_create(deployment_context, extra_args):
|
||||||
for python_file_path in python_file_paths:
|
for python_file_path in python_file_paths:
|
||||||
if python_file_path.exists():
|
if python_file_path.exists():
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
continue
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
if _has_method(imported_stack, "create"):
|
if _has_method(imported_stack, "create"):
|
||||||
|
|
@ -600,6 +610,8 @@ def create_operation(
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
||||||
|
if pod_file_path is None:
|
||||||
|
continue
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
||||||
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
||||||
|
|
@ -688,7 +700,8 @@ def create_operation(
|
||||||
deployment_type, deployment_context
|
deployment_type, deployment_context
|
||||||
)
|
)
|
||||||
# TODO: make deployment_dir_path a Path above
|
# TODO: make deployment_dir_path a Path above
|
||||||
deployer_config_generator.generate(deployment_dir_path)
|
if deployer_config_generator is not None:
|
||||||
|
deployer_config_generator.generate(deployment_dir_path)
|
||||||
call_stack_deploy_create(
|
call_stack_deploy_create(
|
||||||
deployment_context, [network_dir, initial_peers, deployment_command_context]
|
deployment_context, [network_dir, initial_peers, deployment_command_context]
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ import os
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
from kubernetes import client
|
from kubernetes import client
|
||||||
from typing import Any, List, Set
|
from typing import Any, List, Optional, Set
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import env_var_map_from_file
|
from stack_orchestrator.util import env_var_map_from_file
|
||||||
|
|
@ -51,7 +51,7 @@ DEFAULT_CONTAINER_RESOURCES = Resources(
|
||||||
|
|
||||||
|
|
||||||
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
||||||
def to_dict(limits: ResourceLimits):
|
def to_dict(limits: Optional[ResourceLimits]):
|
||||||
if not limits:
|
if not limits:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
@ -83,9 +83,11 @@ class ClusterInfo:
|
||||||
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||||
# Find the set of images in the pods
|
# Find the set of images in the pods
|
||||||
self.image_set = images_for_deployment(pod_files)
|
self.image_set = images_for_deployment(pod_files)
|
||||||
self.environment_variables = DeployEnvVars(
|
# Filter out None values from env file
|
||||||
env_var_map_from_file(compose_env_file)
|
env_vars = {
|
||||||
)
|
k: v for k, v in env_var_map_from_file(compose_env_file).items() if v
|
||||||
|
}
|
||||||
|
self.environment_variables = DeployEnvVars(env_vars)
|
||||||
self.app_name = deployment_name
|
self.app_name = deployment_name
|
||||||
self.spec = spec
|
self.spec = spec
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
|
|
@ -214,6 +216,7 @@ class ClusterInfo:
|
||||||
|
|
||||||
# TODO: suppoprt multiple services
|
# TODO: suppoprt multiple services
|
||||||
def get_service(self):
|
def get_service(self):
|
||||||
|
port = None
|
||||||
for pod_name in self.parsed_pod_yaml_map:
|
for pod_name in self.parsed_pod_yaml_map:
|
||||||
pod = self.parsed_pod_yaml_map[pod_name]
|
pod = self.parsed_pod_yaml_map[pod_name]
|
||||||
services = pod["services"]
|
services = pod["services"]
|
||||||
|
|
@ -223,6 +226,8 @@ class ClusterInfo:
|
||||||
port = int(service_info["ports"][0])
|
port = int(service_info["ports"][0])
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"service port: {port}")
|
print(f"service port: {port}")
|
||||||
|
if port is None:
|
||||||
|
return None
|
||||||
service = client.V1Service(
|
service = client.V1Service(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
|
|
@ -287,9 +292,9 @@ class ClusterInfo:
|
||||||
print(f"{cfg_map_name} not in pod files")
|
print(f"{cfg_map_name} not in pod files")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not cfg_map_path.startswith("/"):
|
if not cfg_map_path.startswith("/") and self.spec.file_path is not None:
|
||||||
cfg_map_path = os.path.join(
|
cfg_map_path = os.path.join(
|
||||||
os.path.dirname(self.spec.file_path), cfg_map_path
|
os.path.dirname(str(self.spec.file_path)), cfg_map_path
|
||||||
)
|
)
|
||||||
|
|
||||||
# Read in all the files at a single-level of the directory.
|
# Read in all the files at a single-level of the directory.
|
||||||
|
|
@ -367,8 +372,9 @@ class ClusterInfo:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# TODO: put things like image pull policy into an object-scope struct
|
# TODO: put things like image pull policy into an object-scope struct
|
||||||
def get_deployment(self, image_pull_policy: str = None):
|
def get_deployment(self, image_pull_policy: Optional[str] = None):
|
||||||
containers = []
|
containers = []
|
||||||
|
services = {}
|
||||||
resources = self.spec.get_container_resources()
|
resources = self.spec.get_container_resources()
|
||||||
if not resources:
|
if not resources:
|
||||||
resources = DEFAULT_CONTAINER_RESOURCES
|
resources = DEFAULT_CONTAINER_RESOURCES
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,8 @@ from datetime import datetime, timezone
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from typing import List
|
from kubernetes.client.exceptions import ApiException
|
||||||
|
from typing import Any, Dict, List, Optional, cast
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
||||||
|
|
@ -50,7 +51,7 @@ class AttrDict(dict):
|
||||||
self.__dict__ = self
|
self.__dict__ = self
|
||||||
|
|
||||||
|
|
||||||
def _check_delete_exception(e: client.exceptions.ApiException):
|
def _check_delete_exception(e: ApiException) -> None:
|
||||||
if e.status == 404:
|
if e.status == 404:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Failed to delete object, continuing")
|
print("Failed to delete object, continuing")
|
||||||
|
|
@ -189,18 +190,25 @@ class K8sDeployer(Deployer):
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this deployment: {deployment}")
|
print(f"Sending this deployment: {deployment}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
deployment_resp = cast(
|
||||||
body=deployment, namespace=self.k8s_namespace
|
client.V1Deployment,
|
||||||
|
self.apps_api.create_namespaced_deployment(
|
||||||
|
body=deployment, namespace=self.k8s_namespace
|
||||||
|
),
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Deployment created:")
|
print("Deployment created:")
|
||||||
ns = deployment_resp.metadata.namespace
|
meta = deployment_resp.metadata
|
||||||
name = deployment_resp.metadata.name
|
spec = deployment_resp.spec
|
||||||
gen = deployment_resp.metadata.generation
|
if meta and spec and spec.template.spec:
|
||||||
img = deployment_resp.spec.template.spec.containers[0].image
|
ns = meta.namespace
|
||||||
print(f"{ns} {name} {gen} {img}")
|
name = meta.name
|
||||||
|
gen = meta.generation
|
||||||
|
containers = spec.template.spec.containers
|
||||||
|
img = containers[0].image if containers else None
|
||||||
|
print(f"{ns} {name} {gen} {img}")
|
||||||
|
|
||||||
service: client.V1Service = self.cluster_info.get_service()
|
service = self.cluster_info.get_service()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this service: {service}")
|
print(f"Sending this service: {service}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
|
|
@ -254,7 +262,7 @@ class K8sDeployer(Deployer):
|
||||||
# Create the kind cluster
|
# Create the kind cluster
|
||||||
create_cluster(
|
create_cluster(
|
||||||
self.kind_cluster_name,
|
self.kind_cluster_name,
|
||||||
self.deployment_dir.joinpath(constants.kind_config_filename),
|
str(self.deployment_dir.joinpath(constants.kind_config_filename)),
|
||||||
)
|
)
|
||||||
# Ensure the referenced containers are copied into kind
|
# Ensure the referenced containers are copied into kind
|
||||||
load_images_into_kind(
|
load_images_into_kind(
|
||||||
|
|
@ -286,7 +294,7 @@ class K8sDeployer(Deployer):
|
||||||
if certificate:
|
if certificate:
|
||||||
print(f"Using existing certificate: {certificate}")
|
print(f"Using existing certificate: {certificate}")
|
||||||
|
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(
|
ingress = self.cluster_info.get_ingress(
|
||||||
use_tls=use_tls, certificate=certificate
|
use_tls=use_tls, certificate=certificate
|
||||||
)
|
)
|
||||||
if ingress:
|
if ingress:
|
||||||
|
|
@ -333,7 +341,7 @@ class K8sDeployer(Deployer):
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PV deleted:")
|
print("PV deleted:")
|
||||||
print(f"{pv_resp}")
|
print(f"{pv_resp}")
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
# Figure out the PVCs for this deployment
|
# Figure out the PVCs for this deployment
|
||||||
|
|
@ -348,7 +356,7 @@ class K8sDeployer(Deployer):
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVCs deleted:")
|
print("PVCs deleted:")
|
||||||
print(f"{pvc_resp}")
|
print(f"{pvc_resp}")
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
# Figure out the ConfigMaps for this deployment
|
# Figure out the ConfigMaps for this deployment
|
||||||
|
|
@ -363,40 +371,40 @@ class K8sDeployer(Deployer):
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("ConfigMap deleted:")
|
print("ConfigMap deleted:")
|
||||||
print(f"{cfg_map_resp}")
|
print(f"{cfg_map_resp}")
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
deployment = self.cluster_info.get_deployment()
|
deployment = self.cluster_info.get_deployment()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this deployment: {deployment}")
|
print(f"Deleting this deployment: {deployment}")
|
||||||
try:
|
if deployment and deployment.metadata and deployment.metadata.name:
|
||||||
self.apps_api.delete_namespaced_deployment(
|
try:
|
||||||
name=deployment.metadata.name, namespace=self.k8s_namespace
|
self.apps_api.delete_namespaced_deployment(
|
||||||
)
|
name=deployment.metadata.name, namespace=self.k8s_namespace
|
||||||
except client.exceptions.ApiException as e:
|
)
|
||||||
_check_delete_exception(e)
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
service: client.V1Service = self.cluster_info.get_service()
|
service = self.cluster_info.get_service()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting service: {service}")
|
print(f"Deleting service: {service}")
|
||||||
try:
|
if service and service.metadata and service.metadata.name:
|
||||||
self.core_api.delete_namespaced_service(
|
try:
|
||||||
namespace=self.k8s_namespace, name=service.metadata.name
|
self.core_api.delete_namespaced_service(
|
||||||
)
|
namespace=self.k8s_namespace, name=service.metadata.name
|
||||||
except client.exceptions.ApiException as e:
|
)
|
||||||
_check_delete_exception(e)
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(
|
ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
||||||
use_tls=not self.is_kind()
|
if ingress and ingress.metadata and ingress.metadata.name:
|
||||||
)
|
|
||||||
if ingress:
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this ingress: {ingress}")
|
print(f"Deleting this ingress: {ingress}")
|
||||||
try:
|
try:
|
||||||
self.networking_api.delete_namespaced_ingress(
|
self.networking_api.delete_namespaced_ingress(
|
||||||
name=ingress.metadata.name, namespace=self.k8s_namespace
|
name=ingress.metadata.name, namespace=self.k8s_namespace
|
||||||
)
|
)
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
else:
|
else:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
|
|
@ -406,12 +414,13 @@ class K8sDeployer(Deployer):
|
||||||
for nodeport in nodeports:
|
for nodeport in nodeports:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this nodeport: {nodeport}")
|
print(f"Deleting this nodeport: {nodeport}")
|
||||||
try:
|
if nodeport.metadata and nodeport.metadata.name:
|
||||||
self.core_api.delete_namespaced_service(
|
try:
|
||||||
namespace=self.k8s_namespace, name=nodeport.metadata.name
|
self.core_api.delete_namespaced_service(
|
||||||
)
|
namespace=self.k8s_namespace, name=nodeport.metadata.name
|
||||||
except client.exceptions.ApiException as e:
|
)
|
||||||
_check_delete_exception(e)
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
else:
|
else:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No nodeport to delete")
|
print("No nodeport to delete")
|
||||||
|
|
@ -428,8 +437,9 @@ class K8sDeployer(Deployer):
|
||||||
|
|
||||||
if all_pods.items:
|
if all_pods.items:
|
||||||
for p in all_pods.items:
|
for p in all_pods.items:
|
||||||
if f"{self.cluster_info.app_name}-deployment" in p.metadata.name:
|
if p.metadata and p.metadata.name:
|
||||||
pods.append(p)
|
if f"{self.cluster_info.app_name}-deployment" in p.metadata.name:
|
||||||
|
pods.append(p)
|
||||||
|
|
||||||
if not pods:
|
if not pods:
|
||||||
return
|
return
|
||||||
|
|
@ -438,24 +448,39 @@ class K8sDeployer(Deployer):
|
||||||
ip = "?"
|
ip = "?"
|
||||||
tls = "?"
|
tls = "?"
|
||||||
try:
|
try:
|
||||||
ingress = self.networking_api.read_namespaced_ingress(
|
cluster_ingress = self.cluster_info.get_ingress()
|
||||||
namespace=self.k8s_namespace,
|
if cluster_ingress is None or cluster_ingress.metadata is None:
|
||||||
name=self.cluster_info.get_ingress().metadata.name,
|
return
|
||||||
|
ingress = cast(
|
||||||
|
client.V1Ingress,
|
||||||
|
self.networking_api.read_namespaced_ingress(
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
name=cluster_ingress.metadata.name,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
if not ingress.spec or not ingress.spec.tls or not ingress.spec.rules:
|
||||||
|
return
|
||||||
|
|
||||||
cert = self.custom_obj_api.get_namespaced_custom_object(
|
cert = cast(
|
||||||
group="cert-manager.io",
|
Dict[str, Any],
|
||||||
version="v1",
|
self.custom_obj_api.get_namespaced_custom_object(
|
||||||
namespace=self.k8s_namespace,
|
group="cert-manager.io",
|
||||||
plural="certificates",
|
version="v1",
|
||||||
name=ingress.spec.tls[0].secret_name,
|
namespace=self.k8s_namespace,
|
||||||
|
plural="certificates",
|
||||||
|
name=ingress.spec.tls[0].secret_name,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
hostname = ingress.spec.rules[0].host
|
hostname = ingress.spec.rules[0].host
|
||||||
ip = ingress.status.load_balancer.ingress[0].ip
|
if ingress.status and ingress.status.load_balancer:
|
||||||
|
lb_ingress = ingress.status.load_balancer.ingress
|
||||||
|
if lb_ingress:
|
||||||
|
ip = lb_ingress[0].ip or "?"
|
||||||
|
cert_status = cert.get("status", {})
|
||||||
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
||||||
cert["status"]["notBefore"],
|
cert_status.get("notBefore", "?"),
|
||||||
cert["status"]["notAfter"],
|
cert_status.get("notAfter", "?"),
|
||||||
ingress.spec.tls[0].hosts,
|
ingress.spec.tls[0].hosts,
|
||||||
)
|
)
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
|
|
@ -469,6 +494,8 @@ class K8sDeployer(Deployer):
|
||||||
print("Pods:")
|
print("Pods:")
|
||||||
|
|
||||||
for p in pods:
|
for p in pods:
|
||||||
|
if not p.metadata:
|
||||||
|
continue
|
||||||
ns = p.metadata.namespace
|
ns = p.metadata.namespace
|
||||||
name = p.metadata.name
|
name = p.metadata.name
|
||||||
if p.metadata.deletion_timestamp:
|
if p.metadata.deletion_timestamp:
|
||||||
|
|
@ -539,7 +566,7 @@ class K8sDeployer(Deployer):
|
||||||
container_log_lines = container_log.splitlines()
|
container_log_lines = container_log.splitlines()
|
||||||
for line in container_log_lines:
|
for line in container_log_lines:
|
||||||
log_data += f"{container}: {line}\n"
|
log_data += f"{container}: {line}\n"
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Error from read_namespaced_pod_log: {e}")
|
print(f"Error from read_namespaced_pod_log: {e}")
|
||||||
log_data = "******* No logs available ********\n"
|
log_data = "******* No logs available ********\n"
|
||||||
|
|
@ -548,25 +575,44 @@ class K8sDeployer(Deployer):
|
||||||
def update(self):
|
def update(self):
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
ref_deployment = self.cluster_info.get_deployment()
|
ref_deployment = self.cluster_info.get_deployment()
|
||||||
|
if not ref_deployment or not ref_deployment.metadata:
|
||||||
|
return
|
||||||
|
ref_name = ref_deployment.metadata.name
|
||||||
|
if not ref_name:
|
||||||
|
return
|
||||||
|
|
||||||
deployment = self.apps_api.read_namespaced_deployment(
|
deployment = cast(
|
||||||
name=ref_deployment.metadata.name, namespace=self.k8s_namespace
|
client.V1Deployment,
|
||||||
|
self.apps_api.read_namespaced_deployment(
|
||||||
|
name=ref_name, namespace=self.k8s_namespace
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
if not deployment.spec or not deployment.spec.template:
|
||||||
|
return
|
||||||
|
template_spec = deployment.spec.template.spec
|
||||||
|
if not template_spec or not template_spec.containers:
|
||||||
|
return
|
||||||
|
|
||||||
new_env = ref_deployment.spec.template.spec.containers[0].env
|
ref_spec = ref_deployment.spec
|
||||||
for container in deployment.spec.template.spec.containers:
|
if ref_spec and ref_spec.template and ref_spec.template.spec:
|
||||||
old_env = container.env
|
ref_containers = ref_spec.template.spec.containers
|
||||||
if old_env != new_env:
|
if ref_containers:
|
||||||
container.env = new_env
|
new_env = ref_containers[0].env
|
||||||
|
for container in template_spec.containers:
|
||||||
|
old_env = container.env
|
||||||
|
if old_env != new_env:
|
||||||
|
container.env = new_env
|
||||||
|
|
||||||
deployment.spec.template.metadata.annotations = {
|
template_meta = deployment.spec.template.metadata
|
||||||
"kubectl.kubernetes.io/restartedAt": datetime.utcnow()
|
if template_meta:
|
||||||
.replace(tzinfo=timezone.utc)
|
template_meta.annotations = {
|
||||||
.isoformat()
|
"kubectl.kubernetes.io/restartedAt": datetime.utcnow()
|
||||||
}
|
.replace(tzinfo=timezone.utc)
|
||||||
|
.isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
self.apps_api.patch_namespaced_deployment(
|
self.apps_api.patch_namespaced_deployment(
|
||||||
name=ref_deployment.metadata.name,
|
name=ref_name,
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
body=deployment,
|
body=deployment,
|
||||||
)
|
)
|
||||||
|
|
@ -585,7 +631,7 @@ class K8sDeployer(Deployer):
|
||||||
# We need to figure out how to do this -- check why we're being called first
|
# We need to figure out how to do this -- check why we're being called first
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def run_job(self, job_name: str, helm_release: str = None):
|
def run_job(self, job_name: str, helm_release: Optional[str] = None):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job
|
from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -138,6 +138,8 @@ def generate_helm_chart(
|
||||||
"""
|
"""
|
||||||
|
|
||||||
parsed_stack = get_parsed_stack_config(stack_path)
|
parsed_stack = get_parsed_stack_config(stack_path)
|
||||||
|
if parsed_stack is None:
|
||||||
|
error_exit(f"Failed to parse stack config: {stack_path}")
|
||||||
stack_name = parsed_stack.get("name", stack_path)
|
stack_name = parsed_stack.get("name", stack_path)
|
||||||
|
|
||||||
# 1. Check Kompose availability
|
# 1. Check Kompose availability
|
||||||
|
|
@ -185,22 +187,28 @@ def generate_helm_chart(
|
||||||
compose_files = []
|
compose_files = []
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file = get_pod_file_path(stack_path, parsed_stack, pod)
|
pod_file = get_pod_file_path(stack_path, parsed_stack, pod)
|
||||||
if not pod_file.exists():
|
if pod_file is None:
|
||||||
error_exit(f"Pod file not found: {pod_file}")
|
error_exit(f"Pod file path not found for pod: {pod}")
|
||||||
compose_files.append(pod_file)
|
pod_file_path = Path(pod_file) if isinstance(pod_file, str) else pod_file
|
||||||
|
if not pod_file_path.exists():
|
||||||
|
error_exit(f"Pod file not found: {pod_file_path}")
|
||||||
|
compose_files.append(pod_file_path)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Found compose file: {pod_file.name}")
|
print(f"Found compose file: {pod_file_path.name}")
|
||||||
|
|
||||||
# Add job compose files
|
# Add job compose files
|
||||||
job_files = []
|
job_files = []
|
||||||
for job in jobs:
|
for job in jobs:
|
||||||
job_file = get_job_file_path(stack_path, parsed_stack, job)
|
job_file = get_job_file_path(stack_path, parsed_stack, job)
|
||||||
if not job_file.exists():
|
if job_file is None:
|
||||||
error_exit(f"Job file not found: {job_file}")
|
error_exit(f"Job file path not found for job: {job}")
|
||||||
compose_files.append(job_file)
|
job_file_path = Path(job_file) if isinstance(job_file, str) else job_file
|
||||||
job_files.append(job_file)
|
if not job_file_path.exists():
|
||||||
|
error_exit(f"Job file not found: {job_file_path}")
|
||||||
|
compose_files.append(job_file_path)
|
||||||
|
job_files.append(job_file_path)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Found job compose file: {job_file.name}")
|
print(f"Found job compose file: {job_file_path.name}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
version = get_kompose_version()
|
version = get_kompose_version()
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import tempfile
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -50,7 +51,7 @@ def get_release_name_from_chart(chart_dir: Path) -> str:
|
||||||
def run_helm_job(
|
def run_helm_job(
|
||||||
chart_dir: Path,
|
chart_dir: Path,
|
||||||
job_name: str,
|
job_name: str,
|
||||||
release: str = None,
|
release: Optional[str] = None,
|
||||||
namespace: str = "default",
|
namespace: str = "default",
|
||||||
timeout: int = 600,
|
timeout: int = 600,
|
||||||
verbose: bool = False,
|
verbose: bool = False,
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
import subprocess
|
import subprocess
|
||||||
import shutil
|
import shutil
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List, Optional
|
||||||
|
|
||||||
|
|
||||||
def check_kompose_available() -> bool:
|
def check_kompose_available() -> bool:
|
||||||
|
|
@ -53,7 +53,7 @@ def get_kompose_version() -> str:
|
||||||
|
|
||||||
|
|
||||||
def convert_to_helm_chart(
|
def convert_to_helm_chart(
|
||||||
compose_files: List[Path], output_dir: Path, chart_name: str = None
|
compose_files: List[Path], output_dir: Path, chart_name: Optional[str] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Invoke kompose to convert Docker Compose files to a Helm chart.
|
Invoke kompose to convert Docker Compose files to a Helm chart.
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import subprocess
|
import subprocess
|
||||||
import re
|
import re
|
||||||
from typing import Set, Mapping, List
|
from typing import Set, Mapping, List, Optional, cast
|
||||||
|
|
||||||
from stack_orchestrator.util import get_k8s_dir, error_exit
|
from stack_orchestrator.util import get_k8s_dir, error_exit
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
@ -75,8 +75,10 @@ def wait_for_ingress_in_kind():
|
||||||
label_selector="app.kubernetes.io/component=controller",
|
label_selector="app.kubernetes.io/component=controller",
|
||||||
timeout_seconds=30,
|
timeout_seconds=30,
|
||||||
):
|
):
|
||||||
if event["object"].status.container_statuses:
|
event_dict = cast(dict, event)
|
||||||
if event["object"].status.container_statuses[0].ready is True:
|
pod = cast(client.V1Pod, event_dict.get("object"))
|
||||||
|
if pod and pod.status and pod.status.container_statuses:
|
||||||
|
if pod.status.container_statuses[0].ready is True:
|
||||||
if warned_waiting:
|
if warned_waiting:
|
||||||
print("Ingress controller is ready")
|
print("Ingress controller is ready")
|
||||||
return
|
return
|
||||||
|
|
@ -119,14 +121,18 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
||||||
return pods
|
return pods
|
||||||
|
|
||||||
|
|
||||||
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str):
|
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str) -> List[str]:
|
||||||
containers = []
|
containers: List[str] = []
|
||||||
pod_response = core_api.read_namespaced_pod(pod_name, namespace="default")
|
pod_response = cast(
|
||||||
|
client.V1Pod, core_api.read_namespaced_pod(pod_name, namespace="default")
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"pod_response: {pod_response}")
|
print(f"pod_response: {pod_response}")
|
||||||
pod_containers = pod_response.spec.containers
|
if not pod_response.spec or not pod_response.spec.containers:
|
||||||
for pod_container in pod_containers:
|
return containers
|
||||||
containers.append(pod_container.name)
|
for pod_container in pod_response.spec.containers:
|
||||||
|
if pod_container.name:
|
||||||
|
containers.append(pod_container.name)
|
||||||
return containers
|
return containers
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -351,7 +357,9 @@ def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _expand_shell_vars(raw_val: str, env_map: Mapping[str, str] = None) -> str:
|
def _expand_shell_vars(
|
||||||
|
raw_val: str, env_map: Optional[Mapping[str, str]] = None
|
||||||
|
) -> str:
|
||||||
# Expand docker-compose style variable substitution:
|
# Expand docker-compose style variable substitution:
|
||||||
# ${VAR} - use VAR value or empty string
|
# ${VAR} - use VAR value or empty string
|
||||||
# ${VAR:-default} - use VAR value or default if unset/empty
|
# ${VAR:-default} - use VAR value or default if unset/empty
|
||||||
|
|
@ -376,7 +384,7 @@ def _expand_shell_vars(raw_val: str, env_map: Mapping[str, str] = None) -> str:
|
||||||
|
|
||||||
|
|
||||||
def envs_from_compose_file(
|
def envs_from_compose_file(
|
||||||
compose_file_envs: Mapping[str, str], env_map: Mapping[str, str] = None
|
compose_file_envs: Mapping[str, str], env_map: Optional[Mapping[str, str]] = None
|
||||||
) -> Mapping[str, str]:
|
) -> Mapping[str, str]:
|
||||||
result = {}
|
result = {}
|
||||||
for env_var, env_val in compose_file_envs.items():
|
for env_var, env_val in compose_file_envs.items():
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import typing
|
import typing
|
||||||
|
from typing import Optional
|
||||||
import humanfriendly
|
import humanfriendly
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
@ -23,9 +24,9 @@ from stack_orchestrator import constants
|
||||||
|
|
||||||
|
|
||||||
class ResourceLimits:
|
class ResourceLimits:
|
||||||
cpus: float = None
|
cpus: Optional[float] = None
|
||||||
memory: int = None
|
memory: Optional[int] = None
|
||||||
storage: int = None
|
storage: Optional[int] = None
|
||||||
|
|
||||||
def __init__(self, obj=None):
|
def __init__(self, obj=None):
|
||||||
if obj is None:
|
if obj is None:
|
||||||
|
|
@ -49,8 +50,8 @@ class ResourceLimits:
|
||||||
|
|
||||||
|
|
||||||
class Resources:
|
class Resources:
|
||||||
limits: ResourceLimits = None
|
limits: Optional[ResourceLimits] = None
|
||||||
reservations: ResourceLimits = None
|
reservations: Optional[ResourceLimits] = None
|
||||||
|
|
||||||
def __init__(self, obj=None):
|
def __init__(self, obj=None):
|
||||||
if obj is None:
|
if obj is None:
|
||||||
|
|
@ -73,9 +74,9 @@ class Resources:
|
||||||
|
|
||||||
class Spec:
|
class Spec:
|
||||||
obj: typing.Any
|
obj: typing.Any
|
||||||
file_path: Path
|
file_path: Optional[Path]
|
||||||
|
|
||||||
def __init__(self, file_path: Path = None, obj=None) -> None:
|
def __init__(self, file_path: Optional[Path] = None, obj=None) -> None:
|
||||||
if obj is None:
|
if obj is None:
|
||||||
obj = {}
|
obj = {}
|
||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
|
|
|
||||||
|
|
@ -73,6 +73,7 @@ def process_app_deployment_request(
|
||||||
app = laconic.get_record(
|
app = laconic.get_record(
|
||||||
app_deployment_request.attributes.application, require=True
|
app_deployment_request.attributes.application, require=True
|
||||||
)
|
)
|
||||||
|
assert app is not None # require=True ensures this
|
||||||
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
||||||
|
|
||||||
# 2. determine dns
|
# 2. determine dns
|
||||||
|
|
@ -483,6 +484,8 @@ def command( # noqa: C901
|
||||||
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
)
|
)
|
||||||
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
||||||
|
assert webapp_deployer_record is not None # require=True ensures this
|
||||||
|
assert webapp_deployer_record.attributes is not None
|
||||||
payment_address = webapp_deployer_record.attributes.paymentAddress
|
payment_address = webapp_deployer_record.attributes.paymentAddress
|
||||||
main_logger.log(f"Payment address: {payment_address}")
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
|
|
@ -495,6 +498,7 @@ def command( # noqa: C901
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
# Find deployment requests.
|
# Find deployment requests.
|
||||||
|
requests = []
|
||||||
# single request
|
# single request
|
||||||
if request_id:
|
if request_id:
|
||||||
main_logger.log(f"Retrieving request {request_id}...")
|
main_logger.log(f"Retrieving request {request_id}...")
|
||||||
|
|
@ -518,25 +522,35 @@ def command( # noqa: C901
|
||||||
previous_requests = load_known_requests(state_file)
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
# Collapse related requests.
|
# Collapse related requests.
|
||||||
requests.sort(key=lambda r: r.createTime)
|
# Filter out None values and sort
|
||||||
requests.reverse()
|
valid_requests = [r for r in requests if r is not None]
|
||||||
|
valid_requests.sort(key=lambda r: r.createTime if r else "")
|
||||||
|
valid_requests.reverse()
|
||||||
requests_by_name = {}
|
requests_by_name = {}
|
||||||
skipped_by_name = {}
|
skipped_by_name = {}
|
||||||
for r in requests:
|
for r in valid_requests:
|
||||||
main_logger.log(f"BEGIN: Examining request {r.id}")
|
if not r:
|
||||||
|
continue
|
||||||
|
r_id = r.id if r else "unknown"
|
||||||
|
main_logger.log(f"BEGIN: Examining request {r_id}")
|
||||||
result = "PENDING"
|
result = "PENDING"
|
||||||
try:
|
try:
|
||||||
if (
|
if (
|
||||||
r.id in previous_requests
|
r_id in previous_requests
|
||||||
and previous_requests[r.id].get("status", "") != "RETRY"
|
and previous_requests[r_id].get("status", "") != "RETRY"
|
||||||
):
|
):
|
||||||
main_logger.log(f"Skipping request {r.id}, we've already seen it.")
|
main_logger.log(f"Skipping request {r_id}, we've already seen it.")
|
||||||
result = "SKIP"
|
result = "SKIP"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if not r.attributes:
|
||||||
|
main_logger.log(f"Skipping request {r_id}, no attributes.")
|
||||||
|
result = "ERROR"
|
||||||
|
continue
|
||||||
|
|
||||||
app = laconic.get_record(r.attributes.application)
|
app = laconic.get_record(r.attributes.application)
|
||||||
if not app:
|
if not app:
|
||||||
main_logger.log(f"Skipping request {r.id}, cannot locate app.")
|
main_logger.log(f"Skipping request {r_id}, cannot locate app.")
|
||||||
result = "ERROR"
|
result = "ERROR"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
@ -544,7 +558,7 @@ def command( # noqa: C901
|
||||||
if not requested_name:
|
if not requested_name:
|
||||||
requested_name = generate_hostname_for_app(app)
|
requested_name = generate_hostname_for_app(app)
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Generating name %s for request %s." % (requested_name, r.id)
|
"Generating name %s for request %s." % (requested_name, r_id)
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
|
|
@ -552,31 +566,33 @@ def command( # noqa: C901
|
||||||
or requested_name in requests_by_name
|
or requested_name in requests_by_name
|
||||||
):
|
):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Ignoring request %s, it has been superseded." % r.id
|
"Ignoring request %s, it has been superseded." % r_id
|
||||||
)
|
)
|
||||||
result = "SKIP"
|
result = "SKIP"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if skip_by_tag(r, include_tags, exclude_tags):
|
if skip_by_tag(r, include_tags, exclude_tags):
|
||||||
|
r_tags = r.attributes.tags if r.attributes else None
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Skipping request %s, filtered by tag "
|
"Skipping request %s, filtered by tag "
|
||||||
"(include %s, exclude %s, present %s)"
|
"(include %s, exclude %s, present %s)"
|
||||||
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
% (r_id, include_tags, exclude_tags, r_tags)
|
||||||
)
|
)
|
||||||
skipped_by_name[requested_name] = r
|
skipped_by_name[requested_name] = r
|
||||||
result = "SKIP"
|
result = "SKIP"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
r_app = r.attributes.application if r.attributes else "unknown"
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Found pending request %s to run application %s on %s."
|
"Found pending request %s to run application %s on %s."
|
||||||
% (r.id, r.attributes.application, requested_name)
|
% (r_id, r_app, requested_name)
|
||||||
)
|
)
|
||||||
requests_by_name[requested_name] = r
|
requests_by_name[requested_name] = r
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
result = "ERROR"
|
result = "ERROR"
|
||||||
main_logger.log(f"ERROR examining request {r.id}: " + str(e))
|
main_logger.log(f"ERROR examining request {r_id}: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
main_logger.log(f"DONE Examining request {r.id} with result {result}.")
|
main_logger.log(f"DONE Examining request {r_id} with result {result}.")
|
||||||
if result in ["ERROR"]:
|
if result in ["ERROR"]:
|
||||||
dump_known_requests(state_file, [r], status=result)
|
dump_known_requests(state_file, [r], status=result)
|
||||||
|
|
||||||
|
|
@ -673,6 +689,7 @@ def command( # noqa: C901
|
||||||
status = "ERROR"
|
status = "ERROR"
|
||||||
run_log_file = None
|
run_log_file = None
|
||||||
run_reg_client = laconic
|
run_reg_client = laconic
|
||||||
|
build_logger = None
|
||||||
try:
|
try:
|
||||||
run_id = (
|
run_id = (
|
||||||
f"{r.id}-{str(time.time()).split('.')[0]}-"
|
f"{r.id}-{str(time.time()).split('.')[0]}-"
|
||||||
|
|
@ -718,7 +735,8 @@ def command( # noqa: C901
|
||||||
status = "DEPLOYED"
|
status = "DEPLOYED"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
main_logger.log(f"ERROR {r.id}:" + str(e))
|
main_logger.log(f"ERROR {r.id}:" + str(e))
|
||||||
build_logger.log("ERROR: " + str(e))
|
if build_logger:
|
||||||
|
build_logger.log("ERROR: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
main_logger.log(f"DEPLOYING {r.id}: END - {status}")
|
main_logger.log(f"DEPLOYING {r.id}: END - {status}")
|
||||||
if build_logger:
|
if build_logger:
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,11 @@ def command( # noqa: C901
|
||||||
):
|
):
|
||||||
laconic = LaconicRegistryClient(laconic_config)
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
if not payment_address:
|
if not payment_address:
|
||||||
payment_address = laconic.whoami().address
|
whoami_result = laconic.whoami()
|
||||||
|
if whoami_result and whoami_result.address:
|
||||||
|
payment_address = whoami_result.address
|
||||||
|
else:
|
||||||
|
raise ValueError("Could not determine payment address from laconic whoami")
|
||||||
|
|
||||||
pub_key = base64.b64encode(open(public_key_file, "rb").read()).decode("ASCII")
|
pub_key = base64.b64encode(open(public_key_file, "rb").read()).decode("ASCII")
|
||||||
hostname = urlparse(api_url).hostname
|
hostname = urlparse(api_url).hostname
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ import shutil
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import NoReturn
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
import gnupg
|
import gnupg
|
||||||
|
|
@ -31,7 +32,7 @@ from stack_orchestrator.deploy.webapp.util import (
|
||||||
from dotenv import dotenv_values
|
from dotenv import dotenv_values
|
||||||
|
|
||||||
|
|
||||||
def fatal(msg: str):
|
def fatal(msg: str) -> NoReturn:
|
||||||
print(msg, file=sys.stderr)
|
print(msg, file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
@ -134,24 +135,30 @@ def command( # noqa: C901
|
||||||
fatal(f"Unable to locate auction: {auction_id}")
|
fatal(f"Unable to locate auction: {auction_id}")
|
||||||
|
|
||||||
# Check auction owner
|
# Check auction owner
|
||||||
if auction.ownerAddress != laconic.whoami().address:
|
whoami = laconic.whoami()
|
||||||
|
if not whoami or not whoami.address:
|
||||||
|
fatal("Unable to determine current account address")
|
||||||
|
if auction.ownerAddress != whoami.address:
|
||||||
fatal(f"Auction {auction_id} owner mismatch")
|
fatal(f"Auction {auction_id} owner mismatch")
|
||||||
|
|
||||||
# Check auction kind
|
# Check auction kind
|
||||||
if auction.kind != AUCTION_KIND_PROVIDER:
|
auction_kind = auction.kind if auction else None
|
||||||
|
if auction_kind != AUCTION_KIND_PROVIDER:
|
||||||
fatal(
|
fatal(
|
||||||
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}"
|
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction_kind}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check auction status
|
# Check auction status
|
||||||
if auction.status != AuctionStatus.COMPLETED:
|
auction_status = auction.status if auction else None
|
||||||
fatal(f"Auction {auction_id} not completed yet, status {auction.status}")
|
if auction_status != AuctionStatus.COMPLETED:
|
||||||
|
fatal(f"Auction {auction_id} not completed yet, status {auction_status}")
|
||||||
|
|
||||||
# Check that winner list is not empty
|
# Check that winner list is not empty
|
||||||
if len(auction.winnerAddresses) == 0:
|
winner_addresses = auction.winnerAddresses if auction else []
|
||||||
|
if not winner_addresses or len(winner_addresses) == 0:
|
||||||
fatal(f"Auction {auction_id} has no winners")
|
fatal(f"Auction {auction_id} has no winners")
|
||||||
|
|
||||||
auction_winners = auction.winnerAddresses
|
auction_winners = winner_addresses
|
||||||
|
|
||||||
# Get deployer record for all the auction winners
|
# Get deployer record for all the auction winners
|
||||||
for auction_winner in auction_winners:
|
for auction_winner in auction_winners:
|
||||||
|
|
@ -198,9 +205,12 @@ def command( # noqa: C901
|
||||||
recip = gpg.list_keys()[0]["uids"][0]
|
recip = gpg.list_keys()[0]["uids"][0]
|
||||||
|
|
||||||
# Wrap the config
|
# Wrap the config
|
||||||
|
whoami_result = laconic.whoami()
|
||||||
|
if not whoami_result or not whoami_result.address:
|
||||||
|
fatal("Unable to determine current account address")
|
||||||
config = {
|
config = {
|
||||||
# Include account (and payment?) details
|
# Include account (and payment?) details
|
||||||
"authorized": [laconic.whoami().address],
|
"authorized": [whoami_result.address],
|
||||||
"config": {"env": dict(dotenv_values(env_file))},
|
"config": {"env": dict(dotenv_values(env_file))},
|
||||||
}
|
}
|
||||||
serialized = yaml.dump(config)
|
serialized = yaml.dump(config)
|
||||||
|
|
@ -227,12 +237,22 @@ def command( # noqa: C901
|
||||||
if (not deployer) and len(deployer_record.names):
|
if (not deployer) and len(deployer_record.names):
|
||||||
target_deployer = deployer_record.names[0]
|
target_deployer = deployer_record.names[0]
|
||||||
|
|
||||||
|
app_name = (
|
||||||
|
app_record.attributes.name
|
||||||
|
if app_record and app_record.attributes
|
||||||
|
else "unknown"
|
||||||
|
)
|
||||||
|
app_version = (
|
||||||
|
app_record.attributes.version
|
||||||
|
if app_record and app_record.attributes
|
||||||
|
else "unknown"
|
||||||
|
)
|
||||||
deployment_request = {
|
deployment_request = {
|
||||||
"record": {
|
"record": {
|
||||||
"type": "ApplicationDeploymentRequest",
|
"type": "ApplicationDeploymentRequest",
|
||||||
"application": app,
|
"application": app,
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"name": f"{app_record.attributes.name}@{app_record.attributes.version}",
|
"name": f"{app_name}@{app_version}",
|
||||||
"deployer": target_deployer,
|
"deployer": target_deployer,
|
||||||
"meta": {"when": str(datetime.utcnow())},
|
"meta": {"when": str(datetime.utcnow())},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,9 +20,9 @@ import yaml
|
||||||
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
|
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
|
||||||
|
|
||||||
|
|
||||||
def fatal(msg: str):
|
def fatal(msg: str) -> None:
|
||||||
print(msg, file=sys.stderr)
|
print(msg, file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1) # noqa: This function never returns
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
|
|
@ -85,18 +85,17 @@ def command(
|
||||||
if dry_run:
|
if dry_run:
|
||||||
undeployment_request["record"]["payment"] = "DRY_RUN"
|
undeployment_request["record"]["payment"] = "DRY_RUN"
|
||||||
elif "auto" == make_payment:
|
elif "auto" == make_payment:
|
||||||
if "minimumPayment" in deployer_record.attributes:
|
attrs = deployer_record.attributes if deployer_record else None
|
||||||
amount = int(
|
if attrs and "minimumPayment" in attrs:
|
||||||
deployer_record.attributes.minimumPayment.replace("alnt", "")
|
amount = int(attrs.minimumPayment.replace("alnt", ""))
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
amount = make_payment
|
amount = make_payment
|
||||||
if amount:
|
if amount:
|
||||||
receipt = laconic.send_tokens(
|
attrs = deployer_record.attributes if deployer_record else None
|
||||||
deployer_record.attributes.paymentAddress, amount
|
if attrs and attrs.paymentAddress:
|
||||||
)
|
receipt = laconic.send_tokens(attrs.paymentAddress, amount)
|
||||||
undeployment_request["record"]["payment"] = receipt.tx.hash
|
undeployment_request["record"]["payment"] = receipt.tx.hash
|
||||||
print("Payment TX:", receipt.tx.hash)
|
print("Payment TX:", receipt.tx.hash)
|
||||||
elif use_payment:
|
elif use_payment:
|
||||||
undeployment_request["record"]["payment"] = use_payment
|
undeployment_request["record"]["payment"] = use_payment
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -39,9 +39,12 @@ WEBAPP_PORT = 80
|
||||||
def command(ctx, image, env_file, port):
|
def command(ctx, image, env_file, port):
|
||||||
"""run the specified webapp container"""
|
"""run the specified webapp container"""
|
||||||
|
|
||||||
env = {}
|
env: dict[str, str] = {}
|
||||||
if env_file:
|
if env_file:
|
||||||
env = dotenv_values(env_file)
|
# Filter out None values from dotenv
|
||||||
|
for k, v in dotenv_values(env_file).items():
|
||||||
|
if v is not None:
|
||||||
|
env[k] = v
|
||||||
|
|
||||||
unique_cluster_descriptor = f"{image},{env}"
|
unique_cluster_descriptor = f"{image},{env}"
|
||||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
||||||
|
|
@ -55,6 +58,11 @@ def command(ctx, image, env_file, port):
|
||||||
compose_env_file=None,
|
compose_env_file=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not deployer:
|
||||||
|
print("Failed to create deployer", file=click.get_text_stream("stderr"))
|
||||||
|
ctx.exit(1)
|
||||||
|
return # Unreachable, but helps type checker
|
||||||
|
|
||||||
ports = []
|
ports = []
|
||||||
if port:
|
if port:
|
||||||
ports = [(port, WEBAPP_PORT)]
|
ports = [(port, WEBAPP_PORT)]
|
||||||
|
|
@ -72,10 +80,19 @@ def command(ctx, image, env_file, port):
|
||||||
# Make configurable?
|
# Make configurable?
|
||||||
webappPort = f"{WEBAPP_PORT}/tcp"
|
webappPort = f"{WEBAPP_PORT}/tcp"
|
||||||
# TODO: This assumes a Docker container object...
|
# TODO: This assumes a Docker container object...
|
||||||
if webappPort in container.network_settings.ports:
|
# Check if container has network_settings (Docker container object)
|
||||||
|
if (
|
||||||
|
container
|
||||||
|
and hasattr(container, "network_settings")
|
||||||
|
and container.network_settings
|
||||||
|
and hasattr(container.network_settings, "ports")
|
||||||
|
and container.network_settings.ports
|
||||||
|
and webappPort in container.network_settings.ports
|
||||||
|
):
|
||||||
mapping = container.network_settings.ports[webappPort][0]
|
mapping = container.network_settings.ports[webappPort][0]
|
||||||
|
container_id = getattr(container, "id", "unknown")
|
||||||
print(
|
print(
|
||||||
f"Image: {image}\n"
|
f"Image: {image}\n"
|
||||||
f"ID: {container.id}\n"
|
f"ID: {container_id}\n"
|
||||||
f"URL: http://localhost:{mapping['HostPort']}"
|
f"URL: http://localhost:{mapping['HostPort']}"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,13 @@ def process_app_removal_request(
|
||||||
deployment_record = laconic.get_record(
|
deployment_record = laconic.get_record(
|
||||||
app_removal_request.attributes.deployment, require=True
|
app_removal_request.attributes.deployment, require=True
|
||||||
)
|
)
|
||||||
|
assert deployment_record is not None # require=True ensures this
|
||||||
|
assert deployment_record.attributes is not None
|
||||||
|
|
||||||
dns_record = laconic.get_record(deployment_record.attributes.dns, require=True)
|
dns_record = laconic.get_record(deployment_record.attributes.dns, require=True)
|
||||||
|
assert dns_record is not None # require=True ensures this
|
||||||
|
assert dns_record.attributes is not None
|
||||||
|
|
||||||
deployment_dir = os.path.join(
|
deployment_dir = os.path.join(
|
||||||
deployment_parent_dir, dns_record.attributes.name.lower()
|
deployment_parent_dir, dns_record.attributes.name.lower()
|
||||||
)
|
)
|
||||||
|
|
@ -57,17 +63,20 @@ def process_app_removal_request(
|
||||||
|
|
||||||
# Or of the original deployment request.
|
# Or of the original deployment request.
|
||||||
if not matched_owner and deployment_record.attributes.request:
|
if not matched_owner and deployment_record.attributes.request:
|
||||||
matched_owner = match_owner(
|
original_request = laconic.get_record(
|
||||||
app_removal_request,
|
deployment_record.attributes.request, require=True
|
||||||
laconic.get_record(deployment_record.attributes.request, require=True),
|
|
||||||
)
|
)
|
||||||
|
assert original_request is not None # require=True ensures this
|
||||||
|
matched_owner = match_owner(app_removal_request, original_request)
|
||||||
|
|
||||||
if matched_owner:
|
if matched_owner:
|
||||||
main_logger.log("Matched deployment ownership:", matched_owner)
|
main_logger.log(f"Matched deployment ownership: {matched_owner}")
|
||||||
else:
|
else:
|
||||||
|
deployment_id = deployment_record.id if deployment_record else "unknown"
|
||||||
|
request_id = app_removal_request.id if app_removal_request else "unknown"
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Unable to confirm ownership of deployment %s for removal request %s"
|
"Unable to confirm ownership of deployment %s for removal request %s"
|
||||||
% (deployment_record.id, app_removal_request.id)
|
% (deployment_id, request_id)
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO(telackey): Call the function directly. The easiest way to build
|
# TODO(telackey): Call the function directly. The easiest way to build
|
||||||
|
|
@ -80,13 +89,18 @@ def process_app_removal_request(
|
||||||
result = subprocess.run(down_command)
|
result = subprocess.run(down_command)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
|
deployer_name = (
|
||||||
|
webapp_deployer_record.names[0]
|
||||||
|
if webapp_deployer_record and webapp_deployer_record.names
|
||||||
|
else ""
|
||||||
|
)
|
||||||
removal_record = {
|
removal_record = {
|
||||||
"record": {
|
"record": {
|
||||||
"type": "ApplicationDeploymentRemovalRecord",
|
"type": "ApplicationDeploymentRemovalRecord",
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"request": app_removal_request.id,
|
"request": app_removal_request.id if app_removal_request else "",
|
||||||
"deployment": deployment_record.id,
|
"deployment": deployment_record.id if deployment_record else "",
|
||||||
"deployer": webapp_deployer_record.names[0],
|
"deployer": deployer_name,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -96,11 +110,11 @@ def process_app_removal_request(
|
||||||
laconic.publish(removal_record)
|
laconic.publish(removal_record)
|
||||||
|
|
||||||
if delete_names:
|
if delete_names:
|
||||||
if deployment_record.names:
|
if deployment_record and deployment_record.names:
|
||||||
for name in deployment_record.names:
|
for name in deployment_record.names:
|
||||||
laconic.delete_name(name)
|
laconic.delete_name(name)
|
||||||
|
|
||||||
if dns_record.names:
|
if dns_record and dns_record.names:
|
||||||
for name in dns_record.names:
|
for name in dns_record.names:
|
||||||
laconic.delete_name(name)
|
laconic.delete_name(name)
|
||||||
|
|
||||||
|
|
@ -224,6 +238,8 @@ def command( # noqa: C901
|
||||||
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
)
|
)
|
||||||
deployer_record = laconic.get_record(lrn, require=True)
|
deployer_record = laconic.get_record(lrn, require=True)
|
||||||
|
assert deployer_record is not None # require=True ensures this
|
||||||
|
assert deployer_record.attributes is not None
|
||||||
payment_address = deployer_record.attributes.paymentAddress
|
payment_address = deployer_record.attributes.paymentAddress
|
||||||
main_logger.log(f"Payment address: {payment_address}")
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
|
|
@ -236,6 +252,7 @@ def command( # noqa: C901
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
# Find deployment removal requests.
|
# Find deployment removal requests.
|
||||||
|
requests = []
|
||||||
# single request
|
# single request
|
||||||
if request_id:
|
if request_id:
|
||||||
main_logger.log(f"Retrieving request {request_id}...")
|
main_logger.log(f"Retrieving request {request_id}...")
|
||||||
|
|
@ -259,32 +276,39 @@ def command( # noqa: C901
|
||||||
main_logger.log(f"Loading known requests from {state_file}...")
|
main_logger.log(f"Loading known requests from {state_file}...")
|
||||||
previous_requests = load_known_requests(state_file)
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
requests.sort(key=lambda r: r.createTime)
|
# Filter out None values and sort by createTime
|
||||||
requests.reverse()
|
valid_requests = [r for r in requests if r is not None]
|
||||||
|
valid_requests.sort(key=lambda r: r.createTime if r else "")
|
||||||
|
valid_requests.reverse()
|
||||||
|
|
||||||
# Find deployments.
|
# Find deployments.
|
||||||
named_deployments = {}
|
named_deployments = {}
|
||||||
main_logger.log("Discovering app deployments...")
|
main_logger.log("Discovering app deployments...")
|
||||||
for d in laconic.app_deployments(all=False):
|
for d in laconic.app_deployments(all=False):
|
||||||
named_deployments[d.id] = d
|
if d and d.id:
|
||||||
|
named_deployments[d.id] = d
|
||||||
|
|
||||||
# Find removal requests.
|
# Find removal requests.
|
||||||
removals_by_deployment = {}
|
removals_by_deployment = {}
|
||||||
removals_by_request = {}
|
removals_by_request = {}
|
||||||
main_logger.log("Discovering deployment removals...")
|
main_logger.log("Discovering deployment removals...")
|
||||||
for r in laconic.app_deployment_removals():
|
for r in laconic.app_deployment_removals():
|
||||||
if r.attributes.deployment:
|
if r and r.attributes and r.attributes.deployment:
|
||||||
# TODO: should we handle CRNs?
|
# TODO: should we handle CRNs?
|
||||||
removals_by_deployment[r.attributes.deployment] = r
|
removals_by_deployment[r.attributes.deployment] = r
|
||||||
|
|
||||||
one_per_deployment = {}
|
one_per_deployment = {}
|
||||||
for r in requests:
|
for r in valid_requests:
|
||||||
|
if not r or not r.attributes:
|
||||||
|
continue
|
||||||
if not r.attributes.deployment:
|
if not r.attributes.deployment:
|
||||||
|
r_id = r.id if r else "unknown"
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping removal request {r.id} since it was a cancellation."
|
f"Skipping removal request {r_id} since it was a cancellation."
|
||||||
)
|
)
|
||||||
elif r.attributes.deployment in one_per_deployment:
|
elif r.attributes.deployment in one_per_deployment:
|
||||||
main_logger.log(f"Skipping removal request {r.id} since it was superseded.")
|
r_id = r.id if r else "unknown"
|
||||||
|
main_logger.log(f"Skipping removal request {r_id} since it was superseded.")
|
||||||
else:
|
else:
|
||||||
one_per_deployment[r.attributes.deployment] = r
|
one_per_deployment[r.attributes.deployment] = r
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,7 @@ import uuid
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from typing import Any, List, Optional, TextIO
|
||||||
|
|
||||||
from stack_orchestrator.deploy.webapp.registry_mutex import registry_mutex
|
from stack_orchestrator.deploy.webapp.registry_mutex import registry_mutex
|
||||||
|
|
||||||
|
|
@ -41,27 +42,35 @@ AUCTION_KIND_PROVIDER = "provider"
|
||||||
|
|
||||||
|
|
||||||
class AttrDict(dict):
|
class AttrDict(dict):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||||
super(AttrDict, self).__init__(*args, **kwargs)
|
super(AttrDict, self).__init__(*args, **kwargs)
|
||||||
self.__dict__ = self
|
self.__dict__ = self
|
||||||
|
|
||||||
def __getattribute__(self, attr):
|
def __getattribute__(self, attr: str) -> Any:
|
||||||
__dict__ = super(AttrDict, self).__getattribute__("__dict__")
|
__dict__ = super(AttrDict, self).__getattribute__("__dict__")
|
||||||
if attr in __dict__:
|
if attr in __dict__:
|
||||||
v = super(AttrDict, self).__getattribute__(attr)
|
v = super(AttrDict, self).__getattribute__(attr)
|
||||||
if isinstance(v, dict):
|
if isinstance(v, dict):
|
||||||
return AttrDict(v)
|
return AttrDict(v)
|
||||||
return v
|
return v
|
||||||
|
return super(AttrDict, self).__getattribute__(attr)
|
||||||
|
|
||||||
|
def __getattr__(self, attr: str) -> Any:
|
||||||
|
# This method is called when attribute is not found
|
||||||
|
# Return None for missing attributes (matches original behavior)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class TimedLogger:
|
class TimedLogger:
|
||||||
def __init__(self, id="", file=None):
|
def __init__(self, id: str = "", file: Optional[TextIO] = None) -> None:
|
||||||
self.start = datetime.datetime.now()
|
self.start = datetime.datetime.now()
|
||||||
self.last = self.start
|
self.last = self.start
|
||||||
self.id = id
|
self.id = id
|
||||||
self.file = file
|
self.file = file
|
||||||
|
|
||||||
def log(self, msg, show_step_time=True, show_total_time=False):
|
def log(
|
||||||
|
self, msg: str, show_step_time: bool = True, show_total_time: bool = False
|
||||||
|
) -> None:
|
||||||
prefix = f"{datetime.datetime.utcnow()} - {self.id}"
|
prefix = f"{datetime.datetime.utcnow()} - {self.id}"
|
||||||
if show_step_time:
|
if show_step_time:
|
||||||
prefix += f" - {datetime.datetime.now() - self.last} (step)"
|
prefix += f" - {datetime.datetime.now() - self.last} (step)"
|
||||||
|
|
@ -79,7 +88,7 @@ def load_known_requests(filename):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
def logged_cmd(log_file, *vargs):
|
def logged_cmd(log_file: Optional[TextIO], *vargs: str) -> str:
|
||||||
result = None
|
result = None
|
||||||
try:
|
try:
|
||||||
if log_file:
|
if log_file:
|
||||||
|
|
@ -88,17 +97,22 @@ def logged_cmd(log_file, *vargs):
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
return result.stdout.decode()
|
return result.stdout.decode()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if result:
|
if log_file:
|
||||||
print(result.stderr.decode(), file=log_file)
|
if result:
|
||||||
else:
|
print(result.stderr.decode(), file=log_file)
|
||||||
print(str(err), file=log_file)
|
else:
|
||||||
|
print(str(err), file=log_file)
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
|
|
||||||
def match_owner(recordA, *records):
|
def match_owner(
|
||||||
|
recordA: Optional[AttrDict], *records: Optional[AttrDict]
|
||||||
|
) -> Optional[str]:
|
||||||
|
if not recordA or not recordA.owners:
|
||||||
|
return None
|
||||||
for owner in recordA.owners:
|
for owner in recordA.owners:
|
||||||
for otherRecord in records:
|
for otherRecord in records:
|
||||||
if owner in otherRecord.owners:
|
if otherRecord and otherRecord.owners and owner in otherRecord.owners:
|
||||||
return owner
|
return owner
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
@ -226,25 +240,27 @@ class LaconicRegistryClient:
|
||||||
]
|
]
|
||||||
|
|
||||||
# Most recent records first
|
# Most recent records first
|
||||||
results.sort(key=lambda r: r.createTime)
|
results.sort(key=lambda r: r.createTime or "")
|
||||||
results.reverse()
|
results.reverse()
|
||||||
self._add_to_cache(results)
|
self._add_to_cache(results)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _add_to_cache(self, records):
|
def _add_to_cache(self, records: List[AttrDict]) -> None:
|
||||||
if not records:
|
if not records:
|
||||||
return
|
return
|
||||||
|
|
||||||
for p in records:
|
for p in records:
|
||||||
self.cache["name_or_id"][p.id] = p
|
if p.id:
|
||||||
|
self.cache["name_or_id"][p.id] = p
|
||||||
if p.names:
|
if p.names:
|
||||||
for lrn in p.names:
|
for lrn in p.names:
|
||||||
self.cache["name_or_id"][lrn] = p
|
self.cache["name_or_id"][lrn] = p
|
||||||
if p.attributes and p.attributes.type:
|
if p.attributes and p.attributes.type:
|
||||||
if p.attributes.type not in self.cache:
|
attr_type = p.attributes.type
|
||||||
self.cache[p.attributes.type] = []
|
if attr_type not in self.cache:
|
||||||
self.cache[p.attributes.type].append(p)
|
self.cache[attr_type] = []
|
||||||
|
self.cache[attr_type].append(p)
|
||||||
|
|
||||||
def resolve(self, name):
|
def resolve(self, name):
|
||||||
if not name:
|
if not name:
|
||||||
|
|
@ -556,26 +572,36 @@ def determine_base_container(clone_dir, app_type="webapp"):
|
||||||
return base_container
|
return base_container
|
||||||
|
|
||||||
|
|
||||||
def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
def build_container_image(
|
||||||
|
app_record: Optional[AttrDict],
|
||||||
|
tag: str,
|
||||||
|
extra_build_args: Optional[List[str]] = None,
|
||||||
|
logger: Optional[TimedLogger] = None,
|
||||||
|
) -> None:
|
||||||
|
if app_record is None:
|
||||||
|
raise ValueError("app_record cannot be None")
|
||||||
if extra_build_args is None:
|
if extra_build_args is None:
|
||||||
extra_build_args = []
|
extra_build_args = []
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
# TODO: determine if this code could be calling into the Python git
|
# TODO: determine if this code could be calling into the Python git
|
||||||
# library like setup-repositories
|
# library like setup-repositories
|
||||||
|
log_file = logger.file if logger else None
|
||||||
try:
|
try:
|
||||||
record_id = app_record["id"]
|
record_id = app_record["id"]
|
||||||
ref = app_record.attributes.repository_ref
|
ref = app_record.attributes.repository_ref
|
||||||
repo = random.choice(app_record.attributes.repository)
|
repo = random.choice(app_record.attributes.repository)
|
||||||
clone_dir = os.path.join(tmpdir, record_id)
|
clone_dir = os.path.join(tmpdir, record_id)
|
||||||
|
|
||||||
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
if logger:
|
||||||
|
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
||||||
# Set github credentials if present running a command like:
|
# Set github credentials if present running a command like:
|
||||||
# git config --global url."https://${TOKEN}:@github.com/".insteadOf
|
# git config --global url."https://${TOKEN}:@github.com/".insteadOf
|
||||||
# "https://github.com/"
|
# "https://github.com/"
|
||||||
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
||||||
if github_token:
|
if github_token:
|
||||||
logger.log("Github token detected, setting it in the git environment")
|
if logger:
|
||||||
|
logger.log("Github token detected, setting it in the git environment")
|
||||||
git_config_args = [
|
git_config_args = [
|
||||||
"git",
|
"git",
|
||||||
"config",
|
"config",
|
||||||
|
|
@ -583,9 +609,7 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
f"url.https://{github_token}:@github.com/.insteadOf",
|
f"url.https://{github_token}:@github.com/.insteadOf",
|
||||||
"https://github.com/",
|
"https://github.com/",
|
||||||
]
|
]
|
||||||
result = subprocess.run(
|
result = subprocess.run(git_config_args, stdout=log_file, stderr=log_file)
|
||||||
git_config_args, stdout=logger.file, stderr=logger.file
|
|
||||||
)
|
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
if ref:
|
if ref:
|
||||||
# TODO: Determing branch or hash, and use depth 1 if we can.
|
# TODO: Determing branch or hash, and use depth 1 if we can.
|
||||||
|
|
@ -596,30 +620,32 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
["git", "clone", repo, clone_dir],
|
["git", "clone", repo, clone_dir],
|
||||||
env=git_env,
|
env=git_env,
|
||||||
stdout=logger.file,
|
stdout=log_file,
|
||||||
stderr=logger.file,
|
stderr=log_file,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log(f"git clone failed. Is the repository {repo} private?")
|
if logger:
|
||||||
|
logger.log(f"git clone failed. Is the repository {repo} private?")
|
||||||
raise e
|
raise e
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
["git", "checkout", ref],
|
["git", "checkout", ref],
|
||||||
cwd=clone_dir,
|
cwd=clone_dir,
|
||||||
env=git_env,
|
env=git_env,
|
||||||
stdout=logger.file,
|
stdout=log_file,
|
||||||
stderr=logger.file,
|
stderr=log_file,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
if logger:
|
||||||
|
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
# TODO: why is this code different vs the branch above (run vs check_call,
|
# TODO: why is this code different vs the branch above (run vs check_call,
|
||||||
# and no prompt disable)?
|
# and no prompt disable)?
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["git", "clone", "--depth", "1", repo, clone_dir],
|
["git", "clone", "--depth", "1", repo, clone_dir],
|
||||||
stdout=logger.file,
|
stdout=log_file,
|
||||||
stderr=logger.file,
|
stderr=log_file,
|
||||||
)
|
)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
|
|
@ -627,7 +653,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
clone_dir, app_record.attributes.app_type
|
clone_dir, app_record.attributes.app_type
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.log("Building webapp ...")
|
if logger:
|
||||||
|
logger.log("Building webapp ...")
|
||||||
build_command = [
|
build_command = [
|
||||||
sys.argv[0],
|
sys.argv[0],
|
||||||
"--verbose",
|
"--verbose",
|
||||||
|
|
@ -643,10 +670,10 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
build_command.append("--extra-build-args")
|
build_command.append("--extra-build-args")
|
||||||
build_command.append(" ".join(extra_build_args))
|
build_command.append(" ".join(extra_build_args))
|
||||||
|
|
||||||
result = subprocess.run(build_command, stdout=logger.file, stderr=logger.file)
|
result = subprocess.run(build_command, stdout=log_file, stderr=log_file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
finally:
|
finally:
|
||||||
logged_cmd(logger.file, "rm", "-rf", tmpdir)
|
logged_cmd(log_file, "rm", "-rf", tmpdir)
|
||||||
|
|
||||||
|
|
||||||
def push_container_image(deployment_dir, logger):
|
def push_container_image(deployment_dir, logger):
|
||||||
|
|
@ -809,8 +836,12 @@ def skip_by_tag(r, include_tags, exclude_tags):
|
||||||
|
|
||||||
|
|
||||||
def confirm_payment(
|
def confirm_payment(
|
||||||
laconic: LaconicRegistryClient, record, payment_address, min_amount, logger
|
laconic: LaconicRegistryClient,
|
||||||
):
|
record: AttrDict,
|
||||||
|
payment_address: str,
|
||||||
|
min_amount: int,
|
||||||
|
logger: TimedLogger,
|
||||||
|
) -> bool:
|
||||||
req_owner = laconic.get_owner(record)
|
req_owner = laconic.get_owner(record)
|
||||||
if req_owner == payment_address:
|
if req_owner == payment_address:
|
||||||
# No need to confirm payment if the sender and recipient are the same account.
|
# No need to confirm payment if the sender and recipient are the same account.
|
||||||
|
|
@ -846,7 +877,8 @@ def confirm_payment(
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
|
tx_amount = tx.amount or ""
|
||||||
|
pay_denom = "".join([i for i in tx_amount if not i.isdigit()])
|
||||||
if pay_denom != "alnt":
|
if pay_denom != "alnt":
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected "
|
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected "
|
||||||
|
|
@ -854,7 +886,7 @@ def confirm_payment(
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
pay_amount = int("".join([i for i in tx.amount if i.isdigit()]))
|
pay_amount = int("".join([i for i in tx_amount if i.isdigit()]) or "0")
|
||||||
if pay_amount < min_amount:
|
if pay_amount < min_amount:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
|
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
|
||||||
|
|
@ -870,7 +902,8 @@ def confirm_payment(
|
||||||
used_request = laconic.get_record(used[0].attributes.request, require=True)
|
used_request = laconic.get_record(used[0].attributes.request, require=True)
|
||||||
|
|
||||||
# Check that payment was used for deployment of same application
|
# Check that payment was used for deployment of same application
|
||||||
if record.attributes.application != used_request.attributes.application:
|
used_app = used_request.attributes.application if used_request else None
|
||||||
|
if record.attributes.application != used_app:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: payment {tx.hash} already used on a different "
|
f"{record.id}: payment {tx.hash} already used on a different "
|
||||||
f"application deployment {used}"
|
f"application deployment {used}"
|
||||||
|
|
@ -890,8 +923,12 @@ def confirm_payment(
|
||||||
|
|
||||||
|
|
||||||
def confirm_auction(
|
def confirm_auction(
|
||||||
laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger
|
laconic: LaconicRegistryClient,
|
||||||
):
|
record: AttrDict,
|
||||||
|
deployer_lrn: str,
|
||||||
|
payment_address: str,
|
||||||
|
logger: TimedLogger,
|
||||||
|
) -> bool:
|
||||||
auction_id = record.attributes.auction
|
auction_id = record.attributes.auction
|
||||||
auction = laconic.get_auction(auction_id)
|
auction = laconic.get_auction(auction_id)
|
||||||
|
|
||||||
|
|
@ -906,7 +943,9 @@ def confirm_auction(
|
||||||
auction_app = laconic.get_record(
|
auction_app = laconic.get_record(
|
||||||
auction_records_by_id[0].attributes.application, require=True
|
auction_records_by_id[0].attributes.application, require=True
|
||||||
)
|
)
|
||||||
if requested_app.id != auction_app.id:
|
requested_app_id = requested_app.id if requested_app else None
|
||||||
|
auction_app_id = auction_app.id if auction_app else None
|
||||||
|
if requested_app_id != auction_app_id:
|
||||||
logger.log(
|
logger.log(
|
||||||
f"{record.id}: requested application {record.attributes.application} "
|
f"{record.id}: requested application {record.attributes.application} "
|
||||||
f"does not match application from auction record "
|
f"does not match application from auction record "
|
||||||
|
|
|
||||||
|
|
@ -17,4 +17,4 @@ from stack_orchestrator.command_types import CommandOptions
|
||||||
|
|
||||||
|
|
||||||
class opts:
|
class opts:
|
||||||
o: CommandOptions = None
|
o: CommandOptions = None # type: ignore[assignment] # Set at runtime
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,9 @@ from stack_orchestrator.util import error_exit
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
||||||
"""Optionally resolve then git clone a repository with stack definitions."""
|
"""Optionally resolve then git clone a repository with stack definitions."""
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
)
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
try:
|
try:
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,8 @@ import os
|
||||||
import sys
|
import sys
|
||||||
from decouple import config
|
from decouple import config
|
||||||
import git
|
import git
|
||||||
from git.exc import GitCommandError
|
from git.exc import GitCommandError, InvalidGitRepositoryError
|
||||||
|
from typing import Any
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import click
|
import click
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
|
|
@ -48,7 +49,7 @@ def is_git_repo(path):
|
||||||
try:
|
try:
|
||||||
_ = git.Repo(path).git_dir
|
_ = git.Repo(path).git_dir
|
||||||
return True
|
return True
|
||||||
except git.exc.InvalidGitRepositoryError:
|
except InvalidGitRepositoryError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -70,10 +71,14 @@ def host_and_path_for_repo(fully_qualified_repo):
|
||||||
# Legacy unqualified repo means github
|
# Legacy unqualified repo means github
|
||||||
if len(repo_host_split) == 2:
|
if len(repo_host_split) == 2:
|
||||||
return "github.com", "/".join(repo_host_split), repo_branch
|
return "github.com", "/".join(repo_host_split), repo_branch
|
||||||
|
elif len(repo_host_split) == 3:
|
||||||
|
# First part is the host
|
||||||
|
return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch
|
||||||
else:
|
else:
|
||||||
if len(repo_host_split) == 3:
|
raise ValueError(
|
||||||
# First part is the host
|
f"Invalid repository format: {fully_qualified_repo}. "
|
||||||
return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch
|
"Expected format: host/org/repo or org/repo"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/18659425/get-git-current-branch-tag-name
|
# See: https://stackoverflow.com/questions/18659425/get-git-current-branch-tag-name
|
||||||
|
|
@ -161,10 +166,12 @@ def process_repo(
|
||||||
f"into {full_filesystem_repo_path}"
|
f"into {full_filesystem_repo_path}"
|
||||||
)
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
|
# Cast to Any to work around GitPython's incomplete type stubs
|
||||||
|
progress: Any = None if opts.o.quiet else GitProgress()
|
||||||
git.Repo.clone_from(
|
git.Repo.clone_from(
|
||||||
full_github_repo_path,
|
full_github_repo_path,
|
||||||
full_filesystem_repo_path,
|
full_filesystem_repo_path,
|
||||||
progress=None if opts.o.quiet else GitProgress(),
|
progress=progress,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print("(git clone skipped)")
|
print("(git clone skipped)")
|
||||||
|
|
@ -244,7 +251,7 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
)
|
)
|
||||||
|
|
||||||
if not quiet:
|
if not quiet:
|
||||||
|
|
@ -288,5 +295,5 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||||
for repo in repos:
|
for repo in repos:
|
||||||
try:
|
try:
|
||||||
process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
||||||
except git.exc.GitCommandError as error:
|
except GitCommandError as error:
|
||||||
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ import sys
|
||||||
import ruamel.yaml
|
import ruamel.yaml
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from dotenv import dotenv_values
|
from dotenv import dotenv_values
|
||||||
from typing import Mapping, Set, List
|
from typing import Mapping, NoReturn, Optional, Set, List
|
||||||
from stack_orchestrator.constants import stack_file_name, deployment_file_name
|
from stack_orchestrator.constants import stack_file_name, deployment_file_name
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -56,7 +56,7 @@ def get_dev_root_path(ctx):
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
)
|
)
|
||||||
return dev_root_path
|
return dev_root_path
|
||||||
|
|
||||||
|
|
@ -161,6 +161,7 @@ def resolve_job_compose_file(stack, job_name: str):
|
||||||
|
|
||||||
def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
|
result = None
|
||||||
if type(pods[0]) is str:
|
if type(pods[0]) is str:
|
||||||
result = resolve_compose_file(stack, pod_name)
|
result = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
|
|
@ -207,6 +208,7 @@ def get_pod_script_paths(parsed_stack, pod_name: str):
|
||||||
|
|
||||||
def pod_has_scripts(parsed_stack, pod_name: str):
|
def pod_has_scripts(parsed_stack, pod_name: str):
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
|
result = False
|
||||||
if type(pods[0]) is str:
|
if type(pods[0]) is str:
|
||||||
result = False
|
result = False
|
||||||
else:
|
else:
|
||||||
|
|
@ -281,15 +283,15 @@ def global_options2(ctx):
|
||||||
return ctx.parent.obj
|
return ctx.parent.obj
|
||||||
|
|
||||||
|
|
||||||
def error_exit(s):
|
def error_exit(s) -> NoReturn:
|
||||||
print(f"ERROR: {s}")
|
print(f"ERROR: {s}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def warn_exit(s):
|
def warn_exit(s) -> NoReturn:
|
||||||
print(f"WARN: {s}")
|
print(f"WARN: {s}")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
def env_var_map_from_file(file: Path) -> Mapping[str, str]:
|
def env_var_map_from_file(file: Path) -> Mapping[str, Optional[str]]:
|
||||||
return dotenv_values(file)
|
return dotenv_values(file)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue