diff --git a/registry/nboyers/.gitignore b/registry/nboyers/.gitignore new file mode 100644 index 000000000..9a58485fa --- /dev/null +++ b/registry/nboyers/.gitignore @@ -0,0 +1,41 @@ +# Local and OS files +.DS_Store +Thumbs.db +*.log +*.tmp +*.swp +*.bak + +# Terraform +.terraform/ +.terraform.lock.hcl +terraform.tfstate +terraform.tfstate.backup +crash.log + +# Node / Bun / Python / other tool artifacts +node_modules/ +bun.lockb +package-lock.json +__pycache__/ +*.pyc + +# Cloud credentials and keys +*.pem +*.key +*.p12 +*.json +*.env +.envrc +aws-credentials +gcp.json +azure-creds.json + +# Archives +*.zip +*.tar.gz +*.tgz + +# Workspace artifacts +workspace/ +output/ diff --git a/registry/nboyers/.images/avatar.png b/registry/nboyers/.images/avatar.png new file mode 100644 index 000000000..546fbd895 Binary files /dev/null and b/registry/nboyers/.images/avatar.png differ diff --git a/registry/nboyers/README.md b/registry/nboyers/README.md new file mode 100644 index 000000000..57dc2bca3 --- /dev/null +++ b/registry/nboyers/README.md @@ -0,0 +1,14 @@ +--- +display_name: "Noah Boyers" +bio: "Cloud & DevOps engineer with an MBA, building scalable multi-cloud infrastructure." +avatar: "./.images/avatar.png" +github: "noahboyers" +linkedin: "https://www.linkedin.com/in/nboyers" +website: "https://nobosoftware.com" +support_email: "hello@nobosoftware.com" +status: "community" +--- + +# Noah Boyers + +Cloud and DevOps engineer focused on scalable, secure, and automated infrastructure across AWS, Azure, and GCP. diff --git a/registry/nboyers/templates/cloud-dev/README.md b/registry/nboyers/templates/cloud-dev/README.md new file mode 100644 index 000000000..5328fb409 --- /dev/null +++ b/registry/nboyers/templates/cloud-dev/README.md @@ -0,0 +1,72 @@ +--- +display_name: "Cloud DevOps Workspace" +description: "A multi-cloud DevOps workspace that runs on Amazon EKS and provides authenticated access to AWS, Azure, and GCP." +icon: "https://raw.githubusercontent.com/coder/coder-icons/main/icons/cloud-devops.svg" +tags: ["devops", "kubernetes", "aws", "eks", "multi-cloud", "terraform", "cdk", "pulumi"] +--- + +# Cloud DevOps Workspace + +A secure, company-standard DevOps environment for platform and cloud engineers. + +This template deploys workspaces **into an existing Amazon EKS cluster** and provides developers with tools and credentials to work with **AWS, Azure, and GCP** from inside their workspace. + +Supports multiple Infrastructure-as-Code frameworks — **Terraform**, **AWS CDK**, and **Pulumi** — for flexible, multi-cloud development. + +## Features + +- **Multi-Cloud Ready** — authenticate to AWS, Azure, or GCP from a single workspace +- **Runs on EKS** — leverages existing Kubernetes infrastructure for scaling and security +- **IaC Tools Included** — Terraform, Terragrunt, CDK, Pulumi, tfsec, and more +- **Secure Isolation** — each workspace runs in its own Kubernetes namespace +- **Configurable Auth** — supports IRSA (AWS), Federated Identity (Azure), and WIF (GCP) + +## Variables + +| Variable | Description | Type | Default | +| ------------------------------------------------------------- | --------------------------------------------------------------- | ------ | ----------- | +| `host_cluster_name` | EKS cluster name where workspaces are deployed | string | — | +| `iac_tool` | Infrastructure-as-Code framework (`terraform`, `cdk`, `pulumi`) | string | `terraform` | +| `enable_aws` | Enable AWS authentication and tools | bool | `true` | +| `enable_azure` | Enable Azure authentication and tools | bool | `false` | +| `enable_gcp` | Enable GCP authentication and tools | bool | `false` | +| `aws_access_key_id` / `aws_secret_access_key` | AWS credentials (optional) | string | `""` | +| `azure_client_id` / `azure_client_secret` / `azure_tenant_id` | Azure credentials (optional) | string | `""` | +| `gcp_service_account` | GCP Service Account JSON (optional) | string | `""` | + +## Runtime Architecture + +| Layer | Platform | Purpose | +| ----------------------- | ------------------ | ------------------------------------------------------------ | +| **Infrastructure** | Amazon EKS | Where Coder deploys and runs the workspaces | +| **Workspace Container** | Ubuntu-based image | Developer environment (Terraform, CDK, Pulumi, CLIs) | +| **Cloud Access** | AWS / Azure / GCP | Target environments for deploying infrastructure or services | + +## Required Permissions and Setup Steps + +This template **runs on EKS** but allows developers inside the workspace to authenticate with **AWS, Azure, or GCP** using their own credentials or service identities. + +### Coder & Infrastructure (Admin Setup) + +Your Coder deployment must have: + +- Network access to an **existing EKS cluster** +- The Coder Helm chart installed and healthy +- Terraform configured with access to the EKS API + +#### Minimum AWS IAM Permissions + +For the identity running the template (Coder service account, Terraform runner, or user): + +```json +{ + "Effect": "Allow", + "Action": [ + "eks:DescribeCluster", + "eks:ListClusters", + "sts:GetCallerIdentity", + "sts:AssumeRole" + ], + "Resource": "*" +} +``` diff --git a/registry/nboyers/templates/cloud-dev/main.tf b/registry/nboyers/templates/cloud-dev/main.tf new file mode 100644 index 000000000..67b64cf09 --- /dev/null +++ b/registry/nboyers/templates/cloud-dev/main.tf @@ -0,0 +1,120 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "~> 0.23" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.23" + } + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } +} + +# --- Coder workspace context --- +data "coder_workspace" "me" {} +data "coder_workspace_owner" "me" {} + +# --- EKS connection --- +data "aws_eks_cluster" "eks" { + name = trimspace(var.host_cluster_name) +} + + +data "aws_eks_cluster_auth" "eks" { + name = trimspace(var.host_cluster_name) +} + +provider "kubernetes" { + host = data.aws_eks_cluster.eks.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.eks.token +} + +# --- Namespace per workspace --- +resource "kubernetes_namespace" "workspace" { + metadata { + name = "coder-${data.coder_workspace_owner.me.name}-${data.coder_workspace.me.name}" + labels = { + "coder.workspace" = data.coder_workspace.me.name + "coder.owner" = data.coder_workspace_owner.me.name + } + } +} + +# --- ServiceAccount (IRSA optional) --- +resource "kubernetes_service_account" "workspace" { + metadata { + name = "coder-${data.coder_workspace_owner.me.name}-${data.coder_workspace.me.name}" + namespace = kubernetes_namespace.workspace.metadata[0].name + + annotations = var.enable_aws && var.aws_role_arn != "" ? { + "eks.amazonaws.com/role-arn" = var.aws_role_arn + } : {} + } +} + +# --- Coder Agent definition --- +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + + startup_script = file("${path.module}/scripts/setup-workspace.sh") + + env = { + # IaC tool & cloud toggles + IAC_TOOL = var.iac_tool + ENABLE_AWS = tostring(var.enable_aws) + ENABLE_AZURE = tostring(var.enable_azure) + ENABLE_GCP = tostring(var.enable_gcp) + + # Developer credentials + AWS_ACCESS_KEY_ID = var.aws_access_key_id + AWS_SECRET_ACCESS_KEY = var.aws_secret_access_key + AZURE_CLIENT_ID = var.azure_client_id + AZURE_TENANT_ID = var.azure_tenant_id + AZURE_CLIENT_SECRET = var.azure_client_secret + GCP_SERVICE_ACCOUNT = var.gcp_service_account + } +} + +# --- Kubernetes Pod (runs workspace container) --- +resource "kubernetes_pod" "workspace" { + count = data.coder_workspace.me.start_count + + metadata { + name = "coder-${data.coder_workspace_owner.me.name}-${data.coder_workspace.me.name}" + namespace = kubernetes_namespace.workspace.metadata[0].name + labels = { + "app" = "coder-workspace" + "coder.owner" = data.coder_workspace_owner.me.name + "coder.agent" = "true" + } + } + + spec { + service_account_name = kubernetes_service_account.workspace.metadata[0].name + + container { + name = "workspace" + image = "codercom/enterprise-base:ubuntu" + command = ["/bin/bash", "-c", coder_agent.main.init_script] + + env { + name = "CODER_AGENT_TOKEN" + value = coder_agent.main.token + } + + resources { + requests = { cpu = "500m", memory = "1Gi" } + limits = { cpu = "2", memory = "4Gi" } + } + } + } + + depends_on = [coder_agent.main] +} diff --git a/registry/nboyers/templates/cloud-dev/scripts/cloud-auth.sh b/registry/nboyers/templates/cloud-dev/scripts/cloud-auth.sh new file mode 100644 index 000000000..18550bb0e --- /dev/null +++ b/registry/nboyers/templates/cloud-dev/scripts/cloud-auth.sh @@ -0,0 +1,319 @@ +#!/usr/bin/env bash +# cloud-auth.sh — Multi-cloud auth helpers (source this file, don't execute) +# Supports: +# - AWS: access keys or IRSA (via pod SA) +# - Azure: federated token or client secret +# - GCP: service account JSON or Workload Identity Federation (KSA -> SA) + +set -euo pipefail + +# -------- util -------- +_has() { command -v "$1" > /dev/null 2>&1; } +_docker_ok() { _has docker && [[ -S /var/run/docker.sock ]]; } + +cloud-auth-help() { + cat << 'EOHELP' +Multi-Cloud Authentication Helper — source this file: + + source ~/workspace/cloud-auth.sh + +Environment variables (read if set): + + # Common toggles (optional) + ENABLE_AWS=true|false + ENABLE_AZURE=true|false + ENABLE_GCP=true|false + + # AWS + AWS_REGION=us-west-2 + AWS_ACCESS_KEY_ID=... + AWS_SECRET_ACCESS_KEY=... + AWS_SESSION_TOKEN=... # optional (STS); if unset, IRSA/IMDS is used + + # Azure + AZURE_CLIENT_ID=... + AZURE_TENANT_ID=... + AZURE_CLIENT_SECRET=... # OR: + AZURE_FEDERATED_TOKEN_FILE=/var/run/secrets/azure/tokens/azure-identity-token + + # GCP + GCP_PROJECT_ID=... + # Option A (Service Account JSON): + GCP_SERVICE_ACCOUNT='{ ... }' + # Option B (Workload Identity Federation): + GCP_WORKLOAD_IDENTITY_PROVIDER=projects/..../locations/global/workloadIdentityPools/.../providers/... + # (uses KSA token at /var/run/secrets/kubernetes.io/serviceaccount/token) + +Functions: + + # AWS + aws-login # ensures creds (keys or IRSA), sets region config if provided + aws-check # prints caller identity + aws-ecr-login # docker login to ECR (if docker socket present) + + # Azure + azure-login # SP login via federated token OR client secret + azure-check # prints account info + azure-acr-login # docker login to ACR (requires AZURE_ACR_NAME) + + # GCP + gcp-login # SA JSON or WIF + gcp-check # prints active gcloud account & project + gcp-gar-login # docker auth to GAR (requires GCP_REGION & PROJECT) + + # Convenience + multicloud-login # calls the per-cloud logins if toggles are true + multicloud-check # calls the per-cloud checks +EOHELP +} + +# -------- AWS -------- +aws-login() { + [[ "${ENABLE_AWS:-true}" == "true" ]] || { + echo "AWS disabled" + return 0 + } + if ! _has aws; then + echo "aws CLI not found" + return 1 + fi + + # If access keys are present, write standard files; otherwise rely on IRSA/IMDS + if [[ -n "${AWS_ACCESS_KEY_ID:-}" ]]; then + mkdir -p "${HOME}/.aws" + { + echo "[default]" + echo "aws_access_key_id=${AWS_ACCESS_KEY_ID}" + echo "aws_secret_access_key=${AWS_SECRET_ACCESS_KEY:-}" + [[ -n "${AWS_SESSION_TOKEN:-}" ]] && echo "aws_session_token=${AWS_SESSION_TOKEN}" + } > "${HOME}/.aws/credentials" + if [[ -n "${AWS_REGION:-}" ]]; then + { + echo "[default]" + echo "region=${AWS_REGION}" + } > "${HOME}/.aws/config" + fi + fi + + # Validate + if ! aws sts get-caller-identity > /dev/null 2>&1; then + echo "❌ AWS auth failed (neither valid keys nor IRSA available)" + return 1 + fi + echo "✅ AWS auth OK" +} + +aws-check() { + _has aws || { + echo "aws CLI not found" + return 1 + } + aws sts get-caller-identity +} + +aws-ecr-login() { + _has aws || { + echo "aws CLI not found" + return 1 + } + _docker_ok || { + echo "ℹ️ docker socket not available; skipping ECR login" + return 0 + } + : "${AWS_REGION:=us-east-1}" + aws-login > /dev/null || return 1 + AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" + aws ecr get-login-password --region "${AWS_REGION}" \ + | docker login --username AWS --password-stdin \ + "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" + export ECR_REGISTRY="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" + echo "✅ ECR login OK → ${ECR_REGISTRY}" +} + +# -------- Azure -------- +azure-login() { + [[ "${ENABLE_AZURE:-false}" == "true" ]] || { + echo "Azure disabled" + return 0 + } + _has az || { + echo "az CLI not found" + return 1 + } + [[ -n "${AZURE_CLIENT_ID:-}" && -n "${AZURE_TENANT_ID:-}" ]] || { + echo "❌ Set AZURE_CLIENT_ID and AZURE_TENANT_ID" + return 1 + } + + if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" && -f "${AZURE_FEDERATED_TOKEN_FILE}" ]]; then + az login --service-principal \ + --username "${AZURE_CLIENT_ID}" \ + --tenant "${AZURE_TENANT_ID}" \ + --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" \ + --allow-no-subscriptions + elif [[ -n "${AZURE_CLIENT_SECRET:-}" ]]; then + az login --service-principal \ + -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" \ + --tenant "${AZURE_TENANT_ID}" + else + echo "❌ Provide AZURE_FEDERATED_TOKEN_FILE or AZURE_CLIENT_SECRET" + return 1 + fi + + echo "✅ Azure auth OK" +} + +azure-check() { + _has az || { + echo "az CLI not found" + return 1 + } + az account show +} + +azure-acr-login() { + _has az || { + echo "az CLI not found" + return 1 + } + _docker_ok || { + echo "ℹ️ docker socket not available; skipping ACR login" + return 0 + } + [[ -n "${AZURE_ACR_NAME:-}" ]] || { + echo "❌ Set AZURE_ACR_NAME" + return 1 + } + az account show > /dev/null 2>&1 || azure-login + az acr login --name "${AZURE_ACR_NAME}" + export ACR_REGISTRY="${AZURE_ACR_NAME}.azurecr.io" + echo "✅ ACR login OK → ${ACR_REGISTRY}" +} + +# -------- GCP -------- +gcp-login() { + [[ "${ENABLE_GCP:-false}" == "true" ]] || { + echo "GCP disabled" + return 0 + } + _has gcloud || { + echo "gcloud not found" + return 1 + } + + if [[ -n "${GCP_SERVICE_ACCOUNT:-}" ]]; then + # Service Account JSON path + echo "${GCP_SERVICE_ACCOUNT}" > /tmp/gcp.json || { + echo "❌ Failed to write GCP credentials" + return 1 + } + export GOOGLE_APPLICATION_CREDENTIALS=/tmp/gcp.json || { + echo "❌ Failed to set GCP credentials path" + return 1 + } + gcloud auth activate-service-account --key-file=/tmp/gcp.json --quiet || { + echo "❌ GCP service account auth failed" + return 1 + } + else + # Workload Identity Federation using KSA token + WIP provider + [[ -n "${GCP_WORKLOAD_IDENTITY_PROVIDER:-}" && -n "${GCP_PROJECT_ID:-}" ]] || { + echo "❌ Provide GCP_SERVICE_ACCOUNT JSON or set GCP_WORKLOAD_IDENTITY_PROVIDER & GCP_PROJECT_ID" + return 1 + } + [[ -f "/var/run/secrets/kubernetes.io/serviceaccount/token" ]] || { + echo "❌ KSA token not found" + return 1 + } + + TMP="/tmp/gcp-wif-$$.json" + cat > "${TMP}" << 'EOF' +{ + "type": "external_account", + "audience": "//iam.googleapis.com/${GCP_WORKLOAD_IDENTITY_PROVIDER}", + "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", + "token_url": "https://sts.googleapis.com/v1/token", + "credential_source": { + "file": "/var/run/secrets/kubernetes.io/serviceaccount/token", + "format": { "type": "text" } + } +} +EOF + [[ $? -eq 0 ]] || { + echo "❌ Failed to write GCP WIF config" + return 1 + } + export GOOGLE_APPLICATION_CREDENTIALS="${TMP}" || { + echo "❌ Failed to set GCP credentials path" + return 1 + } + gcloud auth login --cred-file="${GOOGLE_APPLICATION_CREDENTIALS}" --quiet || { + echo "❌ GCP WIF auth failed" + return 1 + } + fi + + if [[ -n "${GCP_PROJECT_ID:-}" ]]; then + gcloud config set project "${GCP_PROJECT_ID}" --quiet + fi + echo "✅ GCP auth OK" +} + +gcp-check() { + _has gcloud || { + echo "gcloud not found" + return 1 + } + gcloud auth list + gcloud config get-value project || true +} + +gcp-gar-login() { + _docker_ok || { + echo "ℹ️ docker socket not available; skipping GAR login" + return 0 + } + : "${GCP_REGION:=us-central1}" + [[ -n "${GCP_PROJECT_ID:-}" ]] || { + echo "❌ Set GCP_PROJECT_ID" + return 1 + } + gcloud auth list --filter=status:ACTIVE --format="value(account)" > /dev/null || gcp-login + gcloud auth configure-docker "${GCP_REGION}-docker.pkg.dev" --quiet + export GAR_REGISTRY="${GCP_REGION}-docker.pkg.dev/${GCP_PROJECT_ID}" + echo "✅ GAR configured → ${GAR_REGISTRY}" +} + +# -------- Convenience -------- +multicloud-login() { + if [[ "${ENABLE_AWS:-true}" == "true" ]]; then + aws-login + fi + if [[ "${ENABLE_AZURE:-false}" == "true" ]]; then + azure-login + fi + if [[ "${ENABLE_GCP:-false}" == "true" ]]; then + gcp-login + fi + echo "✨ Multi-cloud login complete" +} + +multicloud-check() { + if [[ "${ENABLE_AWS:-true}" == "true" ]]; then + echo "AWS:" + aws-check + echo + fi + if [[ "${ENABLE_AZURE:-false}" == "true" ]]; then + echo "Azure:" + azure-check + echo + fi + if [[ "${ENABLE_GCP:-false}" == "true" ]]; then + echo "GCP:" + gcp-check + echo + fi +} + +echo "✨ cloud-auth loaded. Run 'cloud-auth-help' for usage." diff --git a/registry/nboyers/templates/cloud-dev/scripts/setup-workspace.sh b/registry/nboyers/templates/cloud-dev/scripts/setup-workspace.sh new file mode 100644 index 000000000..937cb9cc7 --- /dev/null +++ b/registry/nboyers/templates/cloud-dev/scripts/setup-workspace.sh @@ -0,0 +1,501 @@ +#!/usr/bin/env bash +set -euo pipefail + +# ========================= +# Helpers & safe defaults +# ========================= +log() { printf '%s %s\n' "👉" "$*"; } +ok() { printf '%s %s\n' "✅" "$*"; } +skip() { printf '%s %s\n' "⏭️" "$*"; } +warn() { printf '%s %s\n' "⚠️" "$*"; } + +# Detect CPU arch (amd64/arm64) +arch() { + case "$(uname -m)" in + x86_64 | amd64) echo amd64 ;; + aarch64 | arm64) echo arm64 ;; + *) echo amd64 ;; + esac +} + +# Map to Docker static tarball arch names +docker_tar_arch() { + case "$(arch)" in + amd64) echo x86_64 ;; + arm64) echo aarch64 ;; + *) echo x86_64 ;; + esac +} + +SAFE_TMP="$(mktemp -d)" +trap 'rm -rf "$SAFE_TMP"' EXIT + +safe_dl() { # url dest + curl -fL --retry 5 --retry-delay 2 --connect-timeout 10 -o "$2" "$1" || { + echo "Failed to download $1" + return 1 + } +} + +docker_ok() { + command -v docker > /dev/null 2>&1 && [[ -S /var/run/docker.sock ]] +} + +# Ensure user bin dir +mkdir -p "$HOME/.local/bin" "$HOME/workspace/app" +export PATH="$HOME/.local/bin:$PATH" + +# Inputs (with sane defaults) +IAC_TOOL="${IAC_TOOL:-terraform}" +TERRAFORM_VERSION="${TERRAFORM_VERSION:-1.6.0}" + +ENABLE_AWS="${ENABLE_AWS:-true}" +ENABLE_AZURE="${ENABLE_AZURE:-false}" +ENABLE_GCP="${ENABLE_GCP:-false}" + +AWS_REGION="${AWS_REGION:-}" +AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID:-}" +AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY:-}" +AWS_SESSION_TOKEN="${AWS_SESSION_TOKEN:-}" + +AZURE_CLIENT_ID="${AZURE_CLIENT_ID:-}" +AZURE_TENANT_ID="${AZURE_TENANT_ID:-}" +AZURE_CLIENT_SECRET="${AZURE_CLIENT_SECRET:-}" +AZURE_FEDERATED_TOKEN_FILE="${AZURE_FEDERATED_TOKEN_FILE:-}" + +GCP_PROJECT_ID="${GCP_PROJECT_ID:-}" +GCP_SERVICE_ACCOUNT="${GCP_SERVICE_ACCOUNT:-}" # full JSON if not using WIF + +REPO_URL="${REPO_URL:-${repo_url:-}}" +DEFAULT_BRANCH="${DEFAULT_BRANCH:-${default_branch:-main}}" +WORKDIR="${WORKDIR:-$HOME/workspace/app}" +GITHUB_TOKEN="${GITHUB_TOKEN:-${GIT_TOKEN:-}}" + +GIT_AUTHOR_NAME="${GIT_AUTHOR_NAME:-}" +GIT_AUTHOR_EMAIL="${GIT_AUTHOR_EMAIL:-}" + +echo "╔════════════════════════════════════════════════════════════════╗" +echo "║ Multi-Cloud DevOps Workspace Setup (no sudo) ║" +echo "╚════════════════════════════════════════════════════════════════╝" +echo + +# ========================================================== +# Write multi-cloud helper functions to ~/workspace/cloud-auth.sh +# ========================================================== +cat > "${HOME}/workspace/cloud-auth.sh" << 'EOAUTHSCRIPT' +#!/usr/bin/env bash +set -euo pipefail + +aws-ecr-login() { + : "${AWS_REGION:=us-east-1}" + if ! command -v aws >/dev/null 2>&1; then echo "aws CLI not found"; return 1; fi + if ! aws sts get-caller-identity &>/dev/null; then + echo "❌ AWS creds not available (IRSA or keys)"; return 1; fi + AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" + if command -v docker >/dev/null 2>&1 && [[ -S /var/run/docker.sock ]]; then + aws ecr get-login-password --region "${AWS_REGION}" | \ + docker login --username AWS --password-stdin \ + "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" + export ECR_REGISTRY="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" + echo "✅ ECR login OK → ${ECR_REGISTRY}" + else + echo "ℹ️ docker socket not available; skipping docker login" + fi +} + +aws-check() { aws sts get-caller-identity && echo "✓ AWS creds valid"; } + +azure-login() { + if ! command -v az >/dev/null 2>&1; then echo "az CLI not found"; return 1; fi + if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" && -f "${AZURE_FEDERATED_TOKEN_FILE}" ]]; then + az login --service-principal --username "${AZURE_CLIENT_ID}" \ + --tenant "${AZURE_TENANT_ID}" \ + --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" \ + --allow-no-subscriptions + elif [[ -n "${AZURE_CLIENT_SECRET:-}" ]]; then + az login --service-principal -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}" + else + echo "❌ Provide AZURE_FEDERATED_TOKEN_FILE or AZURE_CLIENT_SECRET"; return 1 + fi + echo "✅ Azure auth OK"; az account show +} + +azure-acr-login() { + [[ -n "${AZURE_ACR_NAME:-}" ]] || { echo "Set AZURE_ACR_NAME"; return 1; } + az account show &>/dev/null || azure-login + if command -v docker >/dev/null 2>&1 && [[ -S /var/run/docker.sock ]]; then + az acr login --name "${AZURE_ACR_NAME}" + export ACR_REGISTRY="${AZURE_ACR_NAME}.azurecr.io" + echo "✅ ACR login OK → ${ACR_REGISTRY}" + else + echo "ℹ️ docker socket not available; skipping docker login" + fi +} + +azure-check() { az account show && echo "✓ Azure creds valid" || { echo "❌ Not logged in"; return 1; }; } + +gcp-login() { + if ! command -v gcloud >/dev/null 2>&1; then echo "gcloud not found"; return 1; fi + if [[ -n "${GCP_SERVICE_ACCOUNT:-}" ]]; then + # SA JSON auth + echo "${GCP_SERVICE_ACCOUNT}" > /tmp/gcp.json || { echo "❌ Failed to write GCP credentials"; return 1; } + export GOOGLE_APPLICATION_CREDENTIALS=/tmp/gcp.json + gcloud auth activate-service-account --key-file=/tmp/gcp.json --quiet || { echo "❌ GCP auth failed"; return 1; } + else + echo "❌ Provide GCP_SERVICE_ACCOUNT JSON (WIF path not configured here)"; return 1 + fi + [[ -n "${GCP_PROJECT_ID:-}" ]] && gcloud config set project "${GCP_PROJECT_ID}" --quiet || true + echo "✅ GCP auth OK"; gcloud auth list +} + +gcp-gar-login() { + : "${GCP_REGION:=us-central1}" + [[ -n "${GCP_PROJECT_ID:-}" ]] || { echo "Set GCP_PROJECT_ID"; return 1; } + gcloud auth list --filter=status:ACTIVE --format="value(account)" &>/dev/null || gcp-login + if command -v docker >/dev/null 2>&1 && [[ -S /var/run/docker.sock ]]; then + gcloud auth configure-docker "${GCP_REGION}-docker.pkg.dev" --quiet + export GAR_REGISTRY="${GCP_REGION}-docker.pkg.dev/${GCP_PROJECT_ID}" + echo "✅ GAR configured → ${GAR_REGISTRY}" + else + echo "ℹ️ docker socket not available; skipping docker login" + fi +} + +gcp-check() { gcloud auth list --filter=status:ACTIVE --format="value(account)" >/dev/null && echo "✓ GCP creds valid" || { echo "❌ Not logged in"; return 1; }; } + +multicloud-login() { + [[ "${ENABLE_AWS:-false}" == "true" ]] && command -v aws >/dev/null && aws-ecr-login || true + [[ "${ENABLE_AZURE:-false}" == "true" ]] && command -v az >/dev/null && azure-login || true + [[ "${ENABLE_GCP:-false}" == "true" ]] && command -v gcloud >/dev/null && gcp-login || true + echo "✨ Multi-cloud login complete" +} + +multicloud-check() { + [[ "${ENABLE_AWS:-false}" == "true" ]] && command -v aws >/dev/null && { echo "AWS:"; aws-check; echo; } || true + [[ "${ENABLE_AZURE:-false}" == "true" ]] && command -v az >/dev/null && { echo "Azure:"; azure-check; echo; } || true + [[ "${ENABLE_GCP:-false}" == "true" ]] && command -v gcloud >/dev/null && { echo "GCP:"; gcp-check; echo; } || true +} + +cloud-auth-help() { + cat <<'EOHELP' +Multi-Cloud Authentication Helper + +Functions: + AWS: aws-ecr-login, aws-check + Azure: azure-login, azure-acr-login, azure-check + GCP: gcp-login, gcp-gar-login, gcp-check + Multi: multicloud-login, multicloud-check, cloud-auth-help +EOHELP + return 0 +} + +echo "✨ Multi-cloud auth helpers loaded. Run 'cloud-auth-help' for help." +EOAUTHSCRIPT +chmod +x "${HOME}/workspace/cloud-auth.sh" +ok "Created ${HOME}/workspace/cloud-auth.sh" +echo + +# ========================= +# IaC tooling +# ========================= +log "Installing IaC tooling (${IAC_TOOL})" +case "$IAC_TOOL" in + terraform) + if ! command -v terraform > /dev/null 2>&1; then + safe_dl "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_$(arch).zip" "$SAFE_TMP/tf.zip" + unzip -q "$SAFE_TMP/tf.zip" -d "$HOME/.local/bin" + ok "Terraform ${TERRAFORM_VERSION} installed" + else + ok "Terraform already installed ($(terraform version | head -1))" + fi + ;; + cdk) + if ! command -v npm > /dev/null 2>&1; then + log "npm not found; installing Node via nvm" + export NVM_DIR="$HOME/.nvm" + curl -fsSL https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash + # shellcheck disable=SC1090 + . "$NVM_DIR/nvm.sh" + nvm install --lts + nvm use --lts + # persist for future shells + grep -q 'NVM_DIR' "$HOME/.bashrc" 2> /dev/null || { + echo 'export NVM_DIR="$HOME/.nvm"' >> "$HOME/.bashrc" + echo '. "$NVM_DIR/nvm.sh"' >> "$HOME/.bashrc" + } + fi + npm install -g aws-cdk > /dev/null + ok "AWS CDK installed ($(cdk --version))" + ;; + pulumi) + if ! command -v pulumi > /dev/null 2>&1; then + curl -fsSL https://get.pulumi.com | sh + export PATH="$PATH:$HOME/.pulumi/bin" + ok "Pulumi installed ($(pulumi version))" + else + ok "Pulumi already installed ($(pulumi version))" + fi + ;; + *) + warn "Unknown IAC_TOOL=${IAC_TOOL}; skipping IaC install" + ;; +esac + +# Extras: Terragrunt, tflint, tfsec, terraform-docs, pre-commit +if ! command -v terragrunt > /dev/null 2>&1; then + TG_VER="0.54.0" + safe_dl "https://github.com/gruntwork-io/terragrunt/releases/download/v${TG_VER}/terragrunt_linux_$(arch)" "$HOME/.local/bin/terragrunt" + chmod +x "$HOME/.local/bin/terragrunt" + ok "Terragrunt v${TG_VER} installed" +fi + +if ! command -v tflint > /dev/null 2>&1; then + # official installer handles arch + curl -fsSL https://raw.githubusercontent.com/terraform-linters/tflint/master/install_linux.sh | bash + mv -f /tmp/tflint "$HOME/.local/bin/" 2> /dev/null || true + ok "tflint installed" +fi + +if ! command -v tfsec > /dev/null 2>&1; then + TFSEC_VER="1.28.1" + safe_dl "https://github.com/aquasecurity/tfsec/releases/download/v${TFSEC_VER}/tfsec-linux-$(arch)" "$HOME/.local/bin/tfsec" + chmod +x "$HOME/.local/bin/tfsec" + ok "tfsec v${TFSEC_VER} installed" +fi + +if ! command -v terraform-docs > /dev/null 2>&1; then + TFD_VER="0.17.0" + safe_dl "https://github.com/terraform-docs/terraform-docs/releases/download/v${TFD_VER}/terraform-docs-v${TFD_VER}-linux-$(arch).tar.gz" "$SAFE_TMP/terraform-docs.tgz" + tar -xzf "$SAFE_TMP/terraform-docs.tgz" -C "$SAFE_TMP" + install -m 0755 "$SAFE_TMP/terraform-docs" "$HOME/.local/bin/terraform-docs" + ok "terraform-docs v${TFD_VER} installed" +fi + +if ! command -v pre-commit > /dev/null 2>&1; then + if command -v pip3 > /dev/null 2>&1; then + pip3 install --user --quiet pre-commit + ok "pre-commit installed" + elif command -v python3 > /dev/null 2>&1; then + python3 -m pip install --user --quiet pre-commit + ok "pre-commit installed" + else + warn "Python3/pip3 not found; skipping pre-commit" + fi +fi + +# ========================= +# Cloud CLIs (user-space) +# ========================= +echo +log "Installing Cloud CLIs (user-space)" + +# AWS CLI v2 +if [[ "${ENABLE_AWS}" == "true" ]] && ! command -v aws > /dev/null 2>&1; then + safe_dl "https://awscli.amazonaws.com/awscli-exe-linux-$(arch).zip" "$SAFE_TMP/awscliv2.zip" \ + || safe_dl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" "$SAFE_TMP/awscliv2.zip" + unzip -q "$SAFE_TMP/awscliv2.zip" -d "$SAFE_TMP" + "$SAFE_TMP/aws/install" -i "$HOME/.local/aws-cli" -b "$HOME/.local/bin" > /dev/null + ok "AWS CLI installed" +fi + +# Azure CLI +if [[ "${ENABLE_AZURE}" == "true" ]] && ! command -v az > /dev/null 2>&1; then + if command -v pip3 > /dev/null 2>&1; then + pip3 install --user --quiet azure-cli && ok "Azure CLI installed" + elif command -v python3 > /dev/null 2>&1; then + python3 -m pip install --user --quiet azure-cli && ok "Azure CLI installed" + else + warn "Python/pip not found; cannot install Azure CLI" + fi +fi + +# Google Cloud SDK +if [[ "${ENABLE_GCP}" == "true" ]] && ! command -v gcloud > /dev/null 2>&1; then + GSDK_ARCH="$([[ "$(arch)" == amd64 ]] && echo x86_64 || echo arm)" + safe_dl "https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-linux-${GSDK_ARCH}.tar.gz" "$SAFE_TMP/gcloud.tgz" + tar -xzf "$SAFE_TMP/gcloud.tgz" -C "$HOME" + mv "$HOME/google-cloud-sdk" "$HOME/.local/google-cloud-sdk" + ln -sf "$HOME/.local/google-cloud-sdk/bin/"{gcloud,gsutil,bq} "$HOME/.local/bin/" || true + "$HOME/.local/google-cloud-sdk/install.sh" --quiet --rc-path /dev/null --path-update=false || true + ok "Google Cloud SDK installed" +fi + +# ========================= +# Container & K8s tools +# ========================= +echo +log "Installing container & Kubernetes tools" + +# Docker CLI (client only) +if ! command -v docker > /dev/null 2>&1; then + DOCKER_VER="25.0.5" + safe_dl "https://download.docker.com/linux/static/stable/$(docker_tar_arch)/docker-${DOCKER_VER}.tgz" "$SAFE_TMP/docker.tgz" + tar -xzf "$SAFE_TMP/docker.tgz" -C "$SAFE_TMP" + install -m 0755 "$SAFE_TMP/docker/docker" "$HOME/.local/bin/docker" + ok "Docker client installed" +fi + +# kubectl +if ! command -v kubectl > /dev/null 2>&1; then + KREL="$(curl -fsSL https://dl.k8s.io/release/stable.txt)" + safe_dl "https://dl.k8s.io/release/${KREL}/bin/linux/$(arch)/kubectl" "$SAFE_TMP/kubectl" + install -m 0755 "$SAFE_TMP/kubectl" "$HOME/.local/bin/kubectl" + ok "kubectl ${KREL} installed" +fi + +# Helm +if ! command -v helm > /dev/null 2>&1; then + curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | USE_SUDO=false HELM_INSTALL_DIR="$HOME/.local/bin" bash + ok "Helm installed" +fi + +# jq / yq +if ! command -v jq > /dev/null 2>&1; then + safe_dl "https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux-$(arch)" "$HOME/.local/bin/jq" + chmod +x "$HOME/.local/bin/jq" + ok "jq installed" +fi + +if ! command -v yq > /dev/null 2>&1; then + safe_dl "https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(arch)" "$HOME/.local/bin/yq" + chmod +x "$HOME/.local/bin/yq" + ok "yq installed" +fi + +# ========================= +# Cloud runtime auth (optional) +# ========================= +echo +log "Configuring runtime cloud auth (if provided)" + +# AWS keys (override IRSA if present) +if [[ "${ENABLE_AWS}" == "true" ]] && [[ -n "$AWS_ACCESS_KEY_ID" ]]; then + mkdir -p "$HOME/.aws" + { + echo "[default]" + echo "aws_access_key_id=${AWS_ACCESS_KEY_ID}" + echo "aws_secret_access_key=${AWS_SECRET_ACCESS_KEY:-}" + [[ -n "$AWS_SESSION_TOKEN" ]] && echo "aws_session_token=${AWS_SESSION_TOKEN}" + } > "$HOME/.aws/credentials" || { warn "Failed to write AWS credentials"; } + if [[ -n "$AWS_REGION" ]]; then + { + echo "[default]" + echo "region=${AWS_REGION}" + } > "$HOME/.aws/config" + fi + ok "AWS runtime creds configured${AWS_REGION:+ (region ${AWS_REGION})}" +else + skip "AWS runtime creds not set" +fi + +# Azure SP (client secret path; federated handled by helper) +if [[ "${ENABLE_AZURE}" == "true" ]] && [[ -n "$AZURE_CLIENT_ID" && -n "$AZURE_TENANT_ID" ]]; then + if command -v az > /dev/null 2>&1; then + if [[ -n "$AZURE_FEDERATED_TOKEN_FILE" && -f "$AZURE_FEDERATED_TOKEN_FILE" ]]; then + az login --service-principal --username "$AZURE_CLIENT_ID" \ + --tenant "$AZURE_TENANT_ID" \ + --federated-token "$(cat "$AZURE_FEDERATED_TOKEN_FILE")" \ + --allow-no-subscriptions > /dev/null + ok "Azure federated login complete" + elif [[ -n "$AZURE_CLIENT_SECRET" ]]; then + az login --service-principal -u "$AZURE_CLIENT_ID" -p "$AZURE_CLIENT_SECRET" --tenant "$AZURE_TENANT_ID" > /dev/null + ok "Azure SP login complete" + else + skip "Azure creds not provided (need federated token file or client secret)" + fi + else + warn "Azure CLI not found; skipping login" + fi +else + skip "Azure runtime auth not configured" +fi + +# GCP SA JSON +if [[ "${ENABLE_GCP}" == "true" ]] && [[ -n "$GCP_SERVICE_ACCOUNT" ]]; then + if command -v gcloud > /dev/null 2>&1; then + echo "$GCP_SERVICE_ACCOUNT" > /tmp/gcp.json || { warn "Failed to write GCP credentials"; } + export GOOGLE_APPLICATION_CREDENTIALS=/tmp/gcp.json + gcloud auth activate-service-account --key-file=/tmp/gcp.json > /dev/null || { warn "GCP auth failed"; } + [[ -n "$GCP_PROJECT_ID" ]] && gcloud config set project "$GCP_PROJECT_ID" --quiet || true + ok "GCP SA auth complete" + else + warn "gcloud not found; skipping GCP auth" + fi +else + skip "GCP runtime auth not configured" +fi + +# ========================= +# Git identity & bootstrap +# ========================= +echo +log "Preparing workspace directory" + +# Git identity +if [[ -n "$GIT_AUTHOR_NAME" ]]; then + git config --global user.name "$GIT_AUTHOR_NAME" +fi +if [[ -n "$GIT_AUTHOR_EMAIL" ]]; then + git config --global user.email "$GIT_AUTHOR_EMAIL" +fi + +mkdir -p "$WORKDIR" + +# Clone or init +if [[ -n "$REPO_URL" ]]; then + URL="$REPO_URL" + if [[ -n "$GITHUB_TOKEN" && "$URL" =~ ^https://github.com/ ]]; then + URL="${URL/https:\/\//https:\/\/${GITHUB_TOKEN}@}" || { warn "Failed to modify URL"; } + warn "Using GITHUB_TOKEN for private repo clone" + fi + if [[ ! -d "$WORKDIR/.git" ]]; then + log "Cloning ${REPO_URL} into ${WORKDIR}" + git clone "$URL" "$WORKDIR" || { warn "Failed to clone repository"; } + pushd "$WORKDIR" > /dev/null + git checkout "$DEFAULT_BRANCH" || git checkout -b "$DEFAULT_BRANCH" + popd > /dev/null + ok "Repository ready @ ${DEFAULT_BRANCH}" + else + ok "Repo already present at ${WORKDIR}" + fi +else + if [[ ! -d "$WORKDIR/.git" ]]; then + log "Initializing empty repository in ${WORKDIR}" + git init -q "$WORKDIR" + pushd "$WORKDIR" > /dev/null + git checkout -b "$DEFAULT_BRANCH" > /dev/null 2>&1 || true + popd > /dev/null + fi + ok "Workspace ready at ${WORKDIR}" +fi + +# ========================= +# Company Terraform skeleton +# ========================= +echo +log "Creating company Terraform skeleton (optional)" +mkdir -p "$WORKDIR/terraform"/{environments/{dev,staging,prod},modules,policies,shared} +cat > "$WORKDIR/terraform/README.md" << 'EOREADME' +# Company Terraform Project +- `environments/` contains per-env stacks. +- `modules/` reusable infra modules. +- `policies/` sentinel/policy-as-code. +- `shared/` backend, providers, etc. +EOREADME +ok "Skeleton present at $WORKDIR/terraform" + +# ========================= +# PATH persistence tip +# ========================= +if ! grep -q 'export PATH="$HOME/.local/bin:$PATH"' "$HOME/.bashrc" 2> /dev/null; then + echo "export PATH=\"\$HOME/.local/bin:\$PATH\"" >> "$HOME/.bashrc" +fi + +echo +ok "Workspace ready!" +echo " • IaC tool: ${IAC_TOOL}" +echo " • AWS enabled: ${ENABLE_AWS}" +echo " • Azure enabled: ${ENABLE_AZURE}" +echo " • GCP enabled: ${ENABLE_GCP}" +[[ -d "$WORKDIR/.git" ]] && echo " • Repo: ${REPO_URL:-} @ ${DEFAULT_BRANCH}" +echo " • Auth helpers: source ~/workspace/cloud-auth.sh" diff --git a/registry/nboyers/templates/cloud-dev/test/basic.tftest.hcl b/registry/nboyers/templates/cloud-dev/test/basic.tftest.hcl new file mode 100644 index 000000000..9009bbf48 --- /dev/null +++ b/registry/nboyers/templates/cloud-dev/test/basic.tftest.hcl @@ -0,0 +1,87 @@ +# Run 'terraform test' from this template directory (where main.tf lives) + +# --- Mock cloud providers so no external calls happen --- +mock_provider "aws" {} +mock_provider "kubernetes" {} + +# Provide fake values for data sources your template reads +override_data { + target = data.aws_eks_cluster.eks + values = { + name = "unit-test-eks" + endpoint = "https://example.eks.local" + certificate_authority = [{ + data = base64encode("dummy-ca") + }] + } +} + +override_data { + target = data.aws_eks_cluster_auth.eks + values = { + token = "dummy-token" + } +} + +# --------------------------- +# 1) Validate configuration +# --------------------------- +run "validate" { + command = validate +} + +# --------------------------- +# 2) Plan with representative inputs +# --------------------------- +run "plan_with_defaults" { + command = plan + + variables { + host_cluster_name = "unit-test-eks" + + # IaC/tooling toggles + iac_tool = "terraform" + enable_aws = true + enable_azure = false + enable_gcp = false + + # Dev creds (empty OK for unit test) + aws_access_key_id = "" + aws_secret_access_key = "" + azure_client_id = "" + azure_tenant_id = "" + azure_client_secret = "" + gcp_service_account = "" + } + + # Simple sanity assertions (adjust resource addresses to your template) + assert { + condition = can(resource.kubernetes_namespace.workspace) + error_message = "kubernetes_namespace.workspace was not created in plan." + } + + assert { + condition = can(resource.coder_agent.main) + error_message = "coder_agent.main was not planned." + } +} + +# --------------------------- +# 3) Plan with CDK selected +# --------------------------- +run "plan_with_cdk" { + command = plan + variables { + host_cluster_name = "unit-test-eks" + iac_tool = "cdk" + enable_aws = true + enable_azure = false + enable_gcp = false + } + + # Ensure the env reflects choice (string map lookup) + assert { + condition = contains(keys(resource.coder_agent.main.env), "IAC_TOOL") + error_message = "IAC_TOOL env not present on coder_agent.main." + } +} diff --git a/registry/nboyers/templates/cloud-dev/variables.tf b/registry/nboyers/templates/cloud-dev/variables.tf new file mode 100644 index 000000000..5fce229bf --- /dev/null +++ b/registry/nboyers/templates/cloud-dev/variables.tf @@ -0,0 +1,120 @@ +# --- Host cluster (where the workspace runs) --- +variable "host_cluster_name" { + description = "EKS cluster name" + type = string + + validation { + condition = can(regex("^[0-9A-Za-z][0-9A-Za-z_-]*$", trimspace(var.host_cluster_name))) + error_message = "Cluster name must match ^[0-9A-Za-z][0-9A-Za-z_-]*$ (no leading space)." + } +} + + +# --- Admin: IaC tool & toggles --- +variable "iac_tool" { + description = "Infrastructure as Code tool" + type = string + default = "terraform" + validation { + condition = contains(["terraform", "cdk", "pulumi"], var.iac_tool) + error_message = "Must be one of: terraform, cdk, pulumi" + } +} + + +variable "enable_aws" { + type = bool + default = true +} + +variable "enable_azure" { + type = bool + default = false +} + +variable "enable_gcp" { + type = bool + default = false +} + +# --- AWS --- +variable "aws_region" { + type = string + default = "us-west-2" +} + +variable "aws_role_arn" { + type = string + default = "" # IRSA optional +} + +variable "aws_access_key_id" { + type = string + default = "" + sensitive = true +} + +variable "aws_secret_access_key" { + type = string + default = "" + sensitive = true +} + +variable "aws_session_token" { + description = "Optional STS session token" + type = string + default = "" + sensitive = true +} + +variable "repo_url" { + description = "Git repository to clone into the workspace (optional)" + type = string + default = "" +} + +variable "default_branch" { + description = "Default branch name to use (if repo is empty or for initial checkout)" + type = string + default = "main" +} + + +# --- Azure --- +variable "azure_subscription_id" { + type = string + default = "" +} + +variable "azure_tenant_id" { + type = string + default = "" + sensitive = true +} + +variable "azure_client_id" { + type = string + default = "" + sensitive = true +} + +variable "azure_client_secret" { + type = string + default = "" + sensitive = true +} + +# --- GCP --- +variable "gcp_project_id" { + type = string + default = "" +} + +variable "gcp_service_account" { + description = "Service Account JSON (paste full JSON) — leave empty if using WIF" + type = string + default = "" + sensitive = true +} + +