diff --git a/stack_orchestrator/data/k8s/components/ingress/ingress-caddy-kind-deploy.yaml b/stack_orchestrator/data/k8s/components/ingress/ingress-caddy-kind-deploy.yaml new file mode 100644 index 00000000..632dcc05 --- /dev/null +++ b/stack_orchestrator/data/k8s/components/ingress/ingress-caddy-kind-deploy.yaml @@ -0,0 +1,260 @@ +# Caddy Ingress Controller for kind +# Based on: https://github.com/caddyserver/ingress +# Provides automatic HTTPS with Let's Encrypt +apiVersion: v1 +kind: Namespace +metadata: + name: caddy-system + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: caddy-ingress-controller + namespace: caddy-system + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: caddy-ingress-controller + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - namespaces + - services + verbs: + - list + - watch + - get + - apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch + - get + - create + - update + - delete + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: caddy-ingress-controller + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: caddy-ingress-controller +subjects: + - kind: ServiceAccount + name: caddy-ingress-controller + namespace: caddy-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: caddy-ingress-controller-configmap + namespace: caddy-system + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress +data: + # Caddy global options + acmeCA: "https://acme-v02.api.letsencrypt.org/directory" + email: "" +--- +apiVersion: v1 +kind: Service +metadata: + name: caddy-ingress-controller + namespace: caddy-system + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress + app.kubernetes.io/component: controller +spec: + type: NodePort + ports: + - name: http + port: 80 + targetPort: http + protocol: TCP + - name: https + port: 443 + targetPort: https + protocol: TCP + selector: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress + app.kubernetes.io/component: controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: caddy-ingress-controller + namespace: caddy-system + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress + app.kubernetes.io/component: controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress + app.kubernetes.io/component: controller + template: + metadata: + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress + app.kubernetes.io/component: controller + spec: + serviceAccountName: caddy-ingress-controller + terminationGracePeriodSeconds: 60 + nodeSelector: + ingress-ready: "true" + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Equal + containers: + - name: caddy-ingress-controller + image: caddy/ingress:latest + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 80 + hostPort: 80 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 443 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - -config-map=caddy-system/caddy-ingress-controller-configmap + - -class-name=caddy + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 1000m + memory: 512Mi + readinessProbe: + httpGet: + path: /healthz + port: 9765 + initialDelaySeconds: 3 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /healthz + port: 9765 + initialDelaySeconds: 3 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: caddy-data + mountPath: /data + - name: caddy-config + mountPath: /config + volumes: + - name: caddy-data + emptyDir: {} + - name: caddy-config + emptyDir: {} +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: caddy + labels: + app.kubernetes.io/name: caddy-ingress-controller + app.kubernetes.io/instance: caddy-ingress + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +spec: + controller: caddy.io/ingress-controller diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 87130c0d..6f3ed83d 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -42,6 +42,7 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_create import create as deployment_create from stack_orchestrator.deploy.deployment_create import init as deployment_init from stack_orchestrator.deploy.deployment_create import setup as deployment_setup +from stack_orchestrator.deploy.k8s import k8s_command @click.group() @@ -54,6 +55,10 @@ from stack_orchestrator.deploy.deployment_create import setup as deployment_setu def command(ctx, include, exclude, env_file, cluster, deploy_to): '''deploy a stack''' + # k8s subcommand doesn't require a stack + if ctx.invoked_subcommand == "k8s": + return + # Although in theory for some subcommands (e.g. deploy create) the stack can be inferred, # Click doesn't allow us to know that here, so we make providing the stack mandatory stack = global_options2(ctx).stack @@ -486,3 +491,4 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en command.add_command(deployment_init) command.add_command(deployment_create) command.add_command(deployment_setup) +command.add_command(k8s_command.command, "k8s") diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index b08b0c34..7afcb40d 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -368,7 +368,9 @@ def init_operation(deploy_command_context, stack, deployer_type, config, spec_file_content.update({"config": merged_config}) ports = _get_mapped_ports(stack, map_ports_to_host) - spec_file_content.update({"network": {"ports": ports}}) + orig_network = spec_file_content.get("network", {}) + orig_network["ports"] = ports + spec_file_content["network"] = orig_network named_volumes = _get_named_volumes(stack) if named_volumes: diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index be1b2e3d..7cd4306b 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -34,8 +34,8 @@ DEFAULT_VOLUME_RESOURCES = Resources({ }) DEFAULT_CONTAINER_RESOURCES = Resources({ - "reservations": {"cpus": "0.1", "memory": "200M"}, - "limits": {"cpus": "1.0", "memory": "2000M"}, + "reservations": {"cpus": "1.0", "memory": "2000M"}, + "limits": {"cpus": "4.0", "memory": "8000M"}, }) @@ -90,23 +90,30 @@ class ClusterInfo: for raw_port in [str(p) for p in service_info["ports"]]: if opts.o.debug: print(f"service port: {raw_port}") - if ":" in raw_port: - parts = raw_port.split(":") + # Parse protocol suffix (e.g., "8001/udp" -> port=8001, protocol=UDP) + protocol = "TCP" + port_str = raw_port + if "/" in raw_port: + port_str, proto = raw_port.rsplit("/", 1) + protocol = proto.upper() + if ":" in port_str: + parts = port_str.split(":") if len(parts) != 2: raise Exception(f"Invalid port definition: {raw_port}") node_port = int(parts[0]) pod_port = int(parts[1]) else: node_port = None - pod_port = int(raw_port) + pod_port = int(port_str) service = client.V1Service( - metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}"), + metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}-{protocol.lower()}"), spec=client.V1ServiceSpec( type="NodePort", ports=[client.V1ServicePort( port=pod_port, target_port=pod_port, - node_port=node_port + node_port=node_port, + protocol=protocol )], selector={"app": self.app_name} ) @@ -326,14 +333,26 @@ class ClusterInfo: container_name = service_name service_info = services[service_name] image = service_info["image"] + container_ports = [] if "ports" in service_info: - port = int(service_info["ports"][0]) + for raw_port in [str(p) for p in service_info["ports"]]: + # Parse protocol suffix (e.g., "8001/udp" -> port=8001, protocol=UDP) + protocol = "TCP" + port_str = raw_port + if "/" in raw_port: + port_str, proto = raw_port.rsplit("/", 1) + protocol = proto.upper() + # Handle host:container port mapping - use container port + if ":" in port_str: + port_str = port_str.split(":")[-1] + port = int(port_str) + container_ports.append(client.V1ContainerPort(container_port=port, protocol=protocol)) if opts.o.debug: print(f"image: {image}") - print(f"service port: {port}") + print(f"service ports: {container_ports}") merged_envs = merge_envs( envs_from_compose_file( - service_info["environment"]), self.environment_variables.map + service_info["environment"], self.environment_variables.map), self.environment_variables.map ) if "environment" in service_info else self.environment_variables.map envs = envs_from_environment_variables_map(merged_envs) if opts.o.debug: @@ -345,12 +364,24 @@ class ClusterInfo: self.spec.get_image_registry(), self.app_name) if self.spec.get_image_registry() is not None else image volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name) + # Handle command/entrypoint from compose file + # In docker-compose: entrypoint -> k8s command, command -> k8s args + container_command = None + container_args = None + if "entrypoint" in service_info: + entrypoint = service_info["entrypoint"] + container_command = entrypoint if isinstance(entrypoint, list) else [entrypoint] + if "command" in service_info: + cmd = service_info["command"] + container_args = cmd if isinstance(cmd, list) else cmd.split() container = client.V1Container( name=container_name, image=image_to_use, image_pull_policy=image_pull_policy, + command=container_command, + args=container_args, env=envs, - ports=[client.V1ContainerPort(container_port=port)], + ports=container_ports if container_ports else None, volume_mounts=volume_mounts, security_context=client.V1SecurityContext( privileged=self.spec.get_privileged(), diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py index 80fb9c6a..76742f11 100644 --- a/stack_orchestrator/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -165,7 +165,8 @@ def volumes_for_pod_files(parsed_pod_files, spec, app_name): volumes = parsed_pod_file["volumes"] for volume_name in volumes.keys(): if volume_name in spec.get_configmaps(): - config_map = client.V1ConfigMapVolumeSource(name=f"{app_name}-{volume_name}") + # Set defaultMode=0o755 to make scripts executable + config_map = client.V1ConfigMapVolumeSource(name=f"{app_name}-{volume_name}", default_mode=0o755) volume = client.V1Volume(name=volume_name, config_map=config_map) result.append(volume) else: @@ -268,23 +269,34 @@ def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]: return result -def _expand_shell_vars(raw_val: str) -> str: - # could be: or ${} or ${:-} - # TODO: implement support for variable substitution and default values - # if raw_val is like ${} print a warning and substitute an empty string - # otherwise return raw_val - match = re.search(r"^\$\{(.*)\}$", raw_val) +def _expand_shell_vars(raw_val: str, env_map: Mapping[str, str] = None) -> str: + # Expand docker-compose style variable substitution: + # ${VAR} - use VAR value or empty string + # ${VAR:-default} - use VAR value or default if unset/empty + # ${VAR-default} - use VAR value or default if unset + if env_map is None: + env_map = {} + if raw_val is None: + return "" + match = re.search(r"^\$\{([^}]+)\}$", raw_val) if match: - print(f"WARNING: found unimplemented environment variable substitution: {raw_val}") - else: - return raw_val + inner = match.group(1) + # Check for default value syntax + if ":-" in inner: + var_name, default_val = inner.split(":-", 1) + return env_map.get(var_name, "") or default_val + elif "-" in inner: + var_name, default_val = inner.split("-", 1) + return env_map.get(var_name, default_val) + else: + return env_map.get(inner, "") + return raw_val -# TODO: handle the case where the same env var is defined in multiple places -def envs_from_compose_file(compose_file_envs: Mapping[str, str]) -> Mapping[str, str]: +def envs_from_compose_file(compose_file_envs: Mapping[str, str], env_map: Mapping[str, str] = None) -> Mapping[str, str]: result = {} for env_var, env_val in compose_file_envs.items(): - expanded_env_val = _expand_shell_vars(env_val) + expanded_env_val = _expand_shell_vars(env_val, env_map) result.update({env_var: expanded_env_val}) return result diff --git a/stack_orchestrator/deploy/k8s/k8s_command.py b/stack_orchestrator/deploy/k8s/k8s_command.py new file mode 100644 index 00000000..506a34fe --- /dev/null +++ b/stack_orchestrator/deploy/k8s/k8s_command.py @@ -0,0 +1,43 @@ +# Copyright © 2024 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from stack_orchestrator.deploy.k8s.helpers import get_kind_cluster + + +@click.group() +@click.pass_context +def command(ctx): + '''k8s cluster management commands''' + pass + + +@command.group() +@click.pass_context +def list(ctx): + '''list k8s resources''' + pass + + +@list.command() +@click.pass_context +def cluster(ctx): + '''Show the existing kind cluster''' + existing_cluster = get_kind_cluster() + if existing_cluster: + print(existing_cluster) + else: + print("No cluster found")