From 8ec63f7c0ee8a1048aa4d73cb910ed20fdebd447 Mon Sep 17 00:00:00 2001 From: Rayhan Hossain Date: Wed, 11 Feb 2026 11:02:26 -0800 Subject: [PATCH 1/4] Initial plan for azure arc integration Signed-off-by: Rayhan Hossain --- .../azure-arc/azure-arc-integration-plan.md | 810 ++++++++++++++++++ operator/arc-extension/README.md | 340 ++++++++ operator/arc-extension/extension.yaml | 139 +++ operator/arc-extension/test-arc-extension.sh | 325 +++++++ operator/arc-extension/values-arc.yaml | 112 +++ 5 files changed, 1726 insertions(+) create mode 100644 docs/designs/azure-arc/azure-arc-integration-plan.md create mode 100644 operator/arc-extension/README.md create mode 100644 operator/arc-extension/extension.yaml create mode 100755 operator/arc-extension/test-arc-extension.sh create mode 100644 operator/arc-extension/values-arc.yaml diff --git a/docs/designs/azure-arc/azure-arc-integration-plan.md b/docs/designs/azure-arc/azure-arc-integration-plan.md new file mode 100644 index 00000000..1cce5189 --- /dev/null +++ b/docs/designs/azure-arc/azure-arc-integration-plan.md @@ -0,0 +1,810 @@ +# Azure Arc Integration Plan for DocumentDB Kubernetes Operator + +## Overview + +This document outlines a two-phase plan to integrate DocumentDB Kubernetes Operator with Azure extensions for deployment tracking and billing across **all** Kubernetes environments. + +**Goal:** Enable customers to install DocumentDB on any Kubernetes cluster (AKS, EKS, GKE, on-premises) while providing visibility in Azure Portal and usage-based billing. + +**Key Insight:** Azure knows a cluster exists, but NOT what's installed on it. Extensions solve this tracking gap. + +### Cluster Type Support + +| Cluster Type | Extension Cluster Type | Arc Agent Required? | +|--------------|----------------------|--------------------| +| **AKS** (Azure) | `managedClusters` | ❌ No - AKS native | +| **EKS** (AWS) | `connectedClusters` | ✅ Yes | +| **GKE** (GCP) | `connectedClusters` | ✅ Yes | +| **On-premises** | `connectedClusters` | ✅ Yes | + +**Same extension type (`Microsoft.DocumentDB.Operator`) works for all cluster types.** + +--- + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Azure Resource Manager │ +│ ┌───────────────────────────────────────────────────────────┐ │ +│ │ Extension Inventory (tracks ALL installations) │ │ +│ │ └── Microsoft.DocumentDB.Operator instances │ │ +│ │ ├── AKS clusters (managedClusters) │ │ +│ │ └── Arc clusters (connectedClusters) │ │ +│ ├── Billing Service (Phase 2) │ │ +│ └── Azure Portal visibility │ │ +│ └───────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────┘ + ▲ ▲ + │ │ + ┌──────────┴──────────┐ ┌──────────┴──────────┐ + │ AKS │ │ Arc-enabled │ + │ (managedClusters) │ │ (connectedClusters)│ + │ │ │ │ + │ No Arc agent │ │ Arc agent required │ + │ Extension: ✅ │ │ Extension: ✅ │ + │ │ │ (EKS, GKE, on-prem)│ + └─────────────────────┘ └─────────────────────┘ +``` + +--- + +## Deployment Options + +Two approaches are available for deploying DocumentDB via Azure Arc: + +### Option A: Azure Extension (Full Registration) + +Deploy as an official Azure extension type. **Works for both AKS and Arc-enabled clusters.** + +```bash +# For AKS clusters (no Arc agent needed) +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-aks-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters # <-- AKS native + +# For Arc-enabled clusters (EKS, GKE, on-prem) +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-arc-cluster \ + --resource-group my-rg \ + --cluster-type connectedClusters # <-- Arc-enabled +``` + +> **Note:** Same extension type, same billing, unified tracking across all cluster types. + +### Option B: Flux GitOps (No Registration) + +Deploy via Flux GitOps configuration. **No Azure extension registration required. Works for both AKS and Arc-enabled clusters.** + +```bash +# For AKS clusters +az k8s-configuration flux create \ + --name documentdb-operator \ + --cluster-name my-aks-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ + --namespace documentdb-operator \ + --scope cluster \ + --source-kind HelmRepository \ + --helm-repo-url "oci://ghcr.io/documentdb" \ + --helm-chart documentdb-operator \ + --helm-chart-version 0.1.3 + +# For Arc-enabled clusters (EKS, GKE, on-prem) +az k8s-configuration flux create \ + --name documentdb-operator \ + --cluster-name my-arc-cluster \ + --resource-group my-rg \ + --cluster-type connectedClusters \ + --namespace documentdb-operator \ + --scope cluster \ + --source-kind HelmRepository \ + --helm-repo-url "oci://ghcr.io/documentdb" \ + --helm-chart documentdb-operator \ + --helm-chart-version 0.1.3 +``` + +> **Note:** Flux is an open-source CNCF GitOps project (not Microsoft-owned). Azure integrates Flux natively for GitOps deployments. + +### Comparison + +| Feature | Option A: Azure Extension | Option B: Flux GitOps | +|---------|--------------------------|----------------------| +| **AKS support** | ✅ Yes (managedClusters) | ✅ Yes (managedClusters) | +| **Arc cluster support** | ✅ Yes (connectedClusters) | ✅ Yes (connectedClusters) | +| **Azure registration required** | ✅ Yes (approval process) | ❌ No | +| **Time to deploy** | 3-4 weeks (registration wait) | Same day | +| **Cluster visible in Portal** | ✅ Yes | ✅ Yes | +| **Deploy via Azure CLI** | ✅ Yes | ✅ Yes | +| **Shows as "Extension" in Portal** | ✅ Yes | ❌ Shows as Flux config | +| **Azure Marketplace listing** | ✅ Yes | ❌ No | +| **Built-in Arc metering** | ✅ Yes | ❌ No (custom required) | +| **Health monitoring in Portal** | ✅ Automatic | ❌ Manual setup | +| **Upgrade via Azure CLI** | ✅ `az k8s-extension update` | ✅ `az k8s-configuration flux update` | +| **Enterprise support** | ✅ Microsoft support | ⚠️ Community + custom | + +### Pros & Cons + +#### Option A: Arc Extension + +**Pros:** +- Official Azure Marketplace presence +- Built-in health monitoring and status reporting +- Native Arc metering for billing (Phase 2) +- Enterprise support from Microsoft +- Consistent experience with other Arc extensions + +**Cons:** +- Requires Azure extension type registration (approval process) +- 2-4 week wait for registration approval +- More complex initial setup + +#### Option B: Flux GitOps + +**Pros:** +- No Azure registration/approval required +- Can deploy immediately +- Uses open-source CNCF standard (Flux) +- Full GitOps workflow with version control +- Works with any Git provider (GitHub, GitLab, Azure Repos) + +**Cons:** +- No Azure Marketplace listing +- Must implement custom metering for billing +- Shows as "Flux configuration" not "Extension" in Portal +- Less integrated Azure experience + +### Recommendation + +| Scenario | Recommended Option | +|----------|-------------------| +| Need to deploy immediately | **Option B: Flux GitOps** | +| Want Azure Marketplace presence | **Option A: Arc Extension** | +| Require built-in Arc billing | **Option A: Arc Extension** | +| Already using GitOps workflow | **Option B: Flux GitOps** | +| Enterprise customers expecting official extension | **Option A: Arc Extension** | + +--- + +## Phase 1: Arc Extension + ARM Visibility + +**Duration:** 3-4 weeks +**Goal:** Install DocumentDB via Arc, view in Azure Portal + +> **Note:** This phase covers Option A (Arc Extension). For Option B (Flux GitOps), skip to the [Flux GitOps Setup](#flux-gitops-setup-option-b) section. + +### What Gets Deployed + +| Component | Deployed By | Location | +|-----------|-------------|----------| +| Azure Arc Agent | Customer (one-time per cluster) | `azure-arc` namespace | +| DocumentDB Operator | Arc Extension Manager | `documentdb-operator` namespace | +| CloudNative-PG Operator | Helm dependency | `documentdb-operator` namespace | + +### Task Breakdown + +#### Task 1.1: Create Extension Manifest (Week 1) + +Create `extension.yaml` that tells Arc how to deploy the operator. + +**Files to create:** +``` +operator/ +└── arc-extension/ + ├── extension.yaml # Arc extension manifest + ├── values-arc.yaml # Arc-specific Helm overrides + └── README.md # Installation guide +``` + +**Deliverable:** Working extension manifest pointing to ghcr.io + +--- + +#### Task 1.2: Set Up Local Test Environment (Week 1-2) + +Set up Kind cluster with Arc agent for local testing. + +**Steps:** +```bash +# Create Kind cluster +kind create cluster --name arc-test + +# Install Arc agent (requires Azure subscription) +az connectedk8s connect --name arc-test --resource-group dev-rg + +# Verify Arc agent running +kubectl get pods -n azure-arc +``` + +**Deliverable:** Reproducible local test environment + +--- + +#### Task 1.3: Test Extension Install Locally (Week 2) + +Test extension deployment before Azure registration. + +**Steps:** +```bash +# Manually deploy extension (simulates Arc behavior) +helm install documentdb-operator \ + oci://ghcr.io/documentdb/documentdb-operator \ + --version 0.1.3 \ + --namespace documentdb-operator \ + --create-namespace + +# Verify deployment +kubectl get pods -n documentdb-operator +kubectl get deployment documentdb-operator -n documentdb-operator +``` + +**Deliverable:** Confirmed Helm chart works via Arc-connected cluster + +--- + +#### Task 1.4: Register Extension Type with Azure (Week 3) + +Register `Microsoft.DocumentDB.Operator` as valid Arc extension type. + +**Steps:** +1. Submit extension registration request to Azure Arc team +2. Provide extension manifest and chart location +3. Configure release trains (preview, stable) +4. Wait for approval (may take several days) + +**Deliverable:** Registered extension type in Azure + +--- + +#### Task 1.5: E2E Testing & Documentation (Week 3-4) + +Full end-to-end testing of customer experience + documentation. + +**Test scenarios:** +```bash +# Install extension via CLI +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name test-cluster \ + --resource-group test-rg \ + --cluster-type connectedClusters + +# Verify in Azure Portal +az k8s-extension show --name documentdb-operator ... + +# Test upgrade +az k8s-extension update --version 0.1.4 ... + +# Test uninstall +az k8s-extension delete --name documentdb-operator ... +``` + +**Deliverables:** +- All test scenarios passing +- Installation guide in README.md +- Troubleshooting guide + +--- + +### Customer Experience + +```bash +# 1. Connect cluster to Azure Arc (one-time per cluster) +az connectedk8s connect --name my-cluster --resource-group my-rg + +# 2. Install DocumentDB extension +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-cluster \ + --resource-group my-rg \ + --cluster-type connectedClusters + +# 3. Verify in Azure Portal or CLI +az k8s-extension show --name documentdb-operator \ + --cluster-name my-cluster --resource-group my-rg \ + --cluster-type connectedClusters +``` + +### Chart Source: Use Existing ghcr.io Registry + +**No repackaging required.** Arc can pull directly from the existing OCI registry. + +| Source Option | Supported | Recommendation | +|---------------|-----------|----------------| +| OCI Registry (ghcr.io) | ✅ | **Use this** - already have it | +| Azure Container Registry | ✅ | Alternative if needed | +| Public Helm repo | ✅ | Alternative option | + +Existing chart location: +``` +oci://ghcr.io/documentdb/documentdb-operator:0.1.1 +``` + +### Extension Manifest Example + +```yaml +# operator/arc-extension/extension.yaml +extensionType: Microsoft.DocumentDB.Operator +version: 0.1.3 + +helm: + # Point directly to existing ghcr.io chart (no repackaging) + registryUrl: oci://ghcr.io/documentdb + chartName: documentdb-operator + chartVersion: 0.1.3 + + releaseName: documentdb-operator + releaseNamespace: documentdb-operator + +# User-configurable settings via az k8s-extension create +configurationSettings: + - name: documentDbVersion + description: "DocumentDB version" + defaultValue: "0.1.3" + +# Health monitoring +healthChecks: + - kind: Deployment + name: documentdb-operator + namespace: documentdb-operator +``` + +### Registry Authentication + +If ghcr.io requires authentication: + +```bash +# Option 1: Make chart public (simplest) +# Option 2: Configure Arc with registry credentials +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --configuration-protected-settings "registry.username=xxx" \ + --configuration-protected-settings "registry.password=xxx" \ + ... +``` + +### Testing Checklist + +- [ ] Extension installs via `az k8s-extension create` +- [ ] Operators running in cluster +- [ ] Extension visible in Azure Portal +- [ ] Health status reported correctly +- [ ] Upgrade and uninstall work + +--- + +## Phase 2: Metering & Billing + +**Duration:** 3-5 weeks (after Phase 1) +**Goal:** Track usage and generate customer invoices + +### Billing Metrics + +| Meter ID | Description | Unit | +|----------|-------------|------| +| `documentdb-instance-hours` | Running DocumentDB instances | instance-hour | +| `documentdb-storage-gb` | Provisioned storage | GB-month | +| `documentdb-vcpu-hours` | Allocated vCPUs | vCPU-hour | + +### Task Breakdown + +#### Task 2.1: Define Billing Model with Azure Commerce (Week 1-2) + +Work with Azure Commerce team to register meter IDs. + +**Steps:** +1. Define pricing tiers and SKUs +2. Register meter IDs with Azure Commerce +3. Set up billing sandbox for testing +4. Define billing frequency (hourly, daily) + +**Deliverable:** Registered meter IDs, billing sandbox access + +--- + +#### Task 2.2: Implement Usage Collection (Week 2-3) + +Add metering code to operator to collect usage data. + +**Files to create:** +``` +operator/src/internal/ +└── metering/ + ├── reporter.go # Usage collection & reporting + ├── reporter_test.go # Unit tests + ├── metrics.go # Meter definitions + └── types.go # Data structures +``` + +**Sample code:** +```go +// internal/metering/reporter.go +type UsageRecord struct { + Timestamp time.Time + InstanceCount int + TotalStorageGB float64 + TotalVCPUs int + ClusterID string +} + +func (r *Reporter) CollectUsage(ctx context.Context) (*UsageRecord, error) { + var docdbList documentdbv1.DocumentDBList + if err := r.client.List(ctx, &docdbList); err != nil { + return nil, err + } + + record := &UsageRecord{Timestamp: time.Now().UTC()} + for _, db := range docdbList.Items { + record.InstanceCount++ + record.TotalStorageGB += parseStorageGB(db.Spec.Resource.Storage.PvcSize) + record.TotalVCPUs += db.Spec.NodeCount * db.Spec.InstancesPerNode + } + return record, nil +} +``` + +**Deliverable:** Working usage collection with unit tests + +--- + +#### Task 2.3: Integrate Arc Metering SDK (Week 3-4) + +Send collected usage to Azure via Arc metering API. + +**Files to create:** +``` +operator/src/internal/ +└── metering/ + └── arc_client.go # Arc metering API client +``` + +**Sample code:** +```go +// internal/metering/arc_client.go +func (c *ArcClient) SubmitUsage(ctx context.Context, records []MeterRecord) error { + // POST to Arc metering endpoint + // https://management.azure.com/.../extensions/.../usage + return c.httpClient.Post(ctx, c.meteringEndpoint, records) +} +``` + +**Deliverable:** Usage data flowing to Azure + +--- + +#### Task 2.4: Add Metering to Controller (Week 4) + +Integrate metering reporter into main reconciliation loop. + +**Modify:** `operator/src/internal/controller/documentdb_controller.go` + +```go +func (r *DocumentDBReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Existing reconciliation logic... + + // Report usage (runs periodically, not on every reconcile) + if r.metering != nil && r.shouldReportUsage() { + if err := r.metering.ReportUsage(ctx); err != nil { + log.Error(err, "failed to report usage") + // Don't fail reconcile for metering errors + } + } + + return ctrl.Result{}, nil +} +``` + +**Deliverable:** Operator reports usage automatically + +--- + +#### Task 2.5: E2E Billing Validation (Week 5) + +Test full billing flow in Azure sandbox. + +**Test scenarios:** +1. Deploy DocumentDB instance → verify usage recorded +2. Scale up instances → verify usage increases +3. Delete instance → verify usage stops +4. Check Azure invoice → verify charges appear + +**Deliverable:** Validated billing accuracy + +--- + +#### Task 2.6: Production Rollout (Week 5) + +Release metering feature to production. + +**Steps:** +1. Feature flag for metering (opt-in initially) +2. Update Helm chart with metering config +3. Update extension manifest +4. Release new version via Arc +5. Monitor billing data flow + +**Deliverable:** GA release with billing + +--- + +## Flux GitOps Setup (Option B) + +Alternative deployment using Flux GitOps. **No Azure extension registration required.** + +### Duration: 1-2 days + +### Prerequisites + +- Azure subscription +- Kubernetes cluster (v1.26+) +- Azure CLI with `connectedk8s` extension + +### Setup Steps + +#### Step 1: Connect Cluster to Azure Arc + +```bash +# Login to Azure +az login +az account set --subscription + +# Create resource group +az group create --name my-arc-rg --location eastus + +# Connect cluster to Arc +az connectedk8s connect --name my-cluster --resource-group my-arc-rg + +# Verify connection +kubectl get pods -n azure-arc +``` + +#### Step 2: Deploy via Flux GitOps + +```bash +# Create Flux configuration for DocumentDB operator +az k8s-configuration flux create \ + --name documentdb-operator \ + --cluster-name my-cluster \ + --resource-group my-arc-rg \ + --cluster-type connectedClusters \ + --namespace documentdb-operator \ + --scope cluster \ + --source-kind HelmRepository \ + --source-url "https://ghcr.io/documentdb" \ + --helm-chart documentdb-operator \ + --helm-chart-version 0.1.3 \ + --helm-release-name documentdb-operator \ + --helm-release-namespace documentdb-operator + +# Verify deployment +kubectl get pods -n documentdb-operator +kubectl get pods -n cnpg-system +``` + +#### Step 3: Verify in Azure Portal + +1. Navigate to **Azure Arc** > **Kubernetes clusters** +2. Select your cluster +3. Go to **GitOps** > **Configurations** +4. Verify `documentdb-operator` configuration status + +### Upgrade via Flux + +```bash +az k8s-configuration flux update \ + --name documentdb-operator \ + --cluster-name my-cluster \ + --resource-group my-arc-rg \ + --cluster-type connectedClusters \ + --helm-chart-version 0.1.4 +``` + +### Uninstall + +```bash +az k8s-configuration flux delete \ + --name documentdb-operator \ + --cluster-name my-cluster \ + --resource-group my-arc-rg \ + --cluster-type connectedClusters \ + --yes +``` + +### Billing with Flux (Custom Metering) + +Since Flux doesn't support built-in Arc metering, implement custom metering: + +1. **Option 1:** Push metrics to Azure Monitor + - Operator collects usage → sends to Azure Monitor workspace + - Build billing reports from Azure Monitor data + +2. **Option 2:** Push metrics to custom backend + - Operator sends usage to your billing service + - Full control over billing logic + +```go +// Example: Push to Azure Monitor instead of Arc metering +func (r *Reporter) ReportToAzureMonitor(ctx context.Context, record *UsageRecord) error { + // POST to Azure Monitor ingestion endpoint + return r.azureMonitorClient.IngestMetrics(ctx, []Metric{ + {Name: "documentdb_instance_count", Value: float64(record.InstanceCount)}, + {Name: "documentdb_storage_gb", Value: record.TotalStorageGB}, + }) +} +``` + +--- + +## Prerequisites + +**For AKS clusters:** +- Azure subscription +- AKS cluster (v1.26+) +- Azure CLI with `aks` extension + +**For non-AKS clusters (EKS, GKE, on-prem):** +- Azure subscription +- Kubernetes cluster (v1.26+) +- Azure CLI with `connectedk8s` extension +- Arc agent installed on cluster (`az connectedk8s connect`) + +**Additional for Option A (Azure Extension):** +- Extension type registration (requires Microsoft approval) +- Azure Commerce onboarding (Phase 2 billing) + +**Additional for Option B (Flux GitOps):** +- None - can start immediately + +### Querying All DocumentDB Installations + +```bash +# Query all DocumentDB extensions across ALL clusters (AKS + Arc) +az graph query -q " + resources + | where type == 'microsoft.kubernetesconfiguration/extensions' + | where properties.extensionType == 'Microsoft.DocumentDB.Operator' + | extend clusterType = case( + id contains 'managedClusters', 'AKS', + id contains 'connectedClusters', 'Arc', + 'Unknown') + | project subscriptionId, resourceGroup, clusterName=split(id,'/')[8], clusterType, version=properties.version +" +``` + +--- + +## References + +- [AKS Cluster Extensions](https://learn.microsoft.com/en-us/azure/aks/cluster-extensions) +- [Azure Arc-enabled Kubernetes](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/) +- [Create Arc Extensions](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/extensions) +- [Arc Metering](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-usage-metering) +- [Flux GitOps with Azure Arc](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/tutorial-use-gitops-flux2) +- [Flux Documentation](https://fluxcd.io/flux/) (CNCF project) +- [Azure Kubernetes Fleet Manager](https://learn.microsoft.com/en-us/azure/kubernetes-fleet/) +- [Azure Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/) + +--- + +## Appendix A: Azure Fleet Manager and Arc + +### Overview + +Azure Kubernetes Fleet Manager is a multi-cluster orchestration service that can manage both AKS and non-Azure Kubernetes clusters. This section clarifies how Fleet relates to Arc and when each should be used. + +### How Fleet Uses Arc + +``` +┌────────────────────────────────────────────────────────────────────┐ +│ Azure Control Plane │ +├────────────────────────────────────────────────────────────────────┤ +│ │ +│ Fleet Hub ◄──────── needs a way to talk to clusters ────────► │ +│ │ +└────────────────────────────────────────────────────────────────────┘ + │ │ + ▼ ▼ + ┌──────────────────┐ ┌──────────────────┐ + │ AKS │ │ EKS / GKE / │ + │ │ │ On-prem │ + │ Already in Azure│ │ │ + │ (native API) │ │ NOT in Azure │ + │ │ │ (no API access) │ + └──────────────────┘ └──────────────────┘ + │ │ + Fleet talks via Fleet needs Arc + Azure Resource Manager as a "bridge" + │ │ + ▼ ▼ + No Arc needed Arc agent required +``` + +**Key insight:** Arc is Azure's universal "reach into any K8s cluster" mechanism. Fleet reuses Arc's secure tunnel rather than implementing its own connectivity solution. + +### Arc Agent Capabilities + +``` +Arc Agent on cluster: +├── Secure tunnel to Azure (outbound HTTPS only) +├── Identity (managed identity for the cluster) +├── Extension framework (install add-ons) +└── Configuration sync (GitOps, policies) + +Fleet uses: +└── Just the secure tunnel (to push Work objects) +``` + +### When Arc Agent is Required + +| Cluster Type | Arc Agent Needed? | Reason | +|--------------|-------------------|--------| +| **AKS** (Azure) | ❌ No | AKS is Azure-native, direct API access | +| **EKS** (AWS) | ✅ Yes | Arc provides Azure connectivity | +| **GKE** (GCP) | ✅ Yes | Arc provides Azure connectivity | +| **On-premises** | ✅ Yes | Arc provides Azure connectivity | + +### Fleet vs Arc Extension: Different Use Cases + +| Aspect | Arc Extension | Fleet Manager | +|--------|---------------|---------------| +| **Primary purpose** | Per-cluster app lifecycle | Multi-cluster orchestration | +| **Installation trigger** | `az k8s-extension create` | ClusterResourcePlacement (CRP) | +| **Tracking mechanism** | ARM extension inventory | CRP status in Fleet hub | +| **Best for** | Individual cluster billing | Enterprise multi-cluster management | +| **Per-app Azure registration** | ✅ Required | ❌ Not required | +| **Built-in metering** | ✅ Yes | ❌ Custom required | + +### Fleet's Work Object + +When Fleet deploys workloads, it creates "Work" objects on member clusters: + +```yaml +apiVersion: placement.kubernetes-fleet.io/v1 +kind: Work +metadata: + name: documentdb-operator-work + namespace: fleet-member- +spec: + workload: + manifests: + - # Helm release or raw manifests pushed by Fleet +``` + +### Querying Installations via Fleet + +```bash +# List all Fleet member clusters +az fleet member list --fleet-name my-fleet -o table + +# Check DocumentDB deployment status across all clusters +kubectl get clusterresourceplacement documentdb-operator \ + -o jsonpath='{.status.placementStatuses[*].clusterName}' +``` + +### Recommendation Matrix + +| Customer Profile | Recommended Approach | +|------------------|---------------------| +| Single cluster | Arc Extension (Option A or B) | +| Multiple AKS clusters | Fleet Manager (no Arc agents needed) | +| Multi-cloud (AKS + EKS/GKE) | Fleet + Arc agents on non-AKS clusters | +| Needs Azure Marketplace billing | Arc Extension (Option A) | +| Enterprise with existing Fleet | Fleet CRPs for deployment, custom metering for billing | + +### Existing Fleet Implementation + +This repository includes Fleet deployment examples in `documentdb-playground/aks-fleet-deployment/`: + +- `documentdb-operator-crp.yaml` - ClusterResourcePlacement for operator +- `cert-manager-crp.yaml` - ClusterResourcePlacement for cert-manager +- `deploy-multi-region.sh` - Multi-region deployment script + +These examples demonstrate AKS-only Fleet deployment (no Arc agents required). diff --git a/operator/arc-extension/README.md b/operator/arc-extension/README.md new file mode 100644 index 00000000..370beb7b --- /dev/null +++ b/operator/arc-extension/README.md @@ -0,0 +1,340 @@ +# DocumentDB Kubernetes Operator - Azure Extension + +Deploy DocumentDB Kubernetes Operator on any Kubernetes cluster using Azure extensions. + +## Overview + +This extension allows you to: +- Install DocumentDB Operator on **AKS** clusters (Azure-native) +- Install DocumentDB Operator on **any Kubernetes cluster** via Azure Arc (on-premises, edge, multi-cloud) +- View and manage the extension in Azure Portal +- Monitor extension health and status from Azure +- Unified billing across all cluster types (Phase 2) + +### Supported Cluster Types + +| Cluster Type | `--cluster-type` | Arc Agent Required? | +|--------------|------------------|--------------------| +| AKS (Azure) | `managedClusters` | No | +| EKS (AWS) | `connectedClusters` | Yes | +| GKE (GCP) | `connectedClusters` | Yes | +| On-premises | `connectedClusters` | Yes | + +## Prerequisites + +### For AKS Clusters + +- Azure subscription +- AKS cluster (v1.26+) +- Azure CLI with `aks` and `k8s-extension` extensions + +```bash +az extension add --name aks-preview +az extension add --name k8s-extension +``` + +### For Non-AKS Clusters (Arc-enabled) + +- Azure subscription +- Kubernetes cluster (v1.26+) +- Azure CLI with `connectedk8s` and `k8s-extension` extensions +- `kubectl` configured to access your cluster + +```bash +az extension add --name connectedk8s +az extension add --name k8s-extension +``` + +## Installation + +### Option 1: AKS Clusters (No Arc Agent Needed) + +```bash +# Login to Azure +az login +az account set --subscription + +# Install extension directly on AKS +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-aks-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ + --release-train stable +``` + +Verify installation: +```bash +# Check extension status +az k8s-extension show \ + --name documentdb-operator \ + --cluster-name my-aks-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters + +# Check pods +kubectl get pods -n documentdb-operator +kubectl get pods -n cnpg-system +``` + +### Option 2: Arc-enabled Clusters (EKS, GKE, On-premises) + +#### Step 1: Connect Your Cluster to Azure Arc (One-time) + +```bash +# Login to Azure +az login +az account set --subscription + +# Create resource group (if needed) +az group create --name my-arc-rg --location eastus + +# Connect cluster to Azure Arc +az connectedk8s connect \ + --name my-cluster \ + --resource-group my-arc-rg +``` + +Verify Arc agent is running: +```bash +kubectl get pods -n azure-arc +``` + +#### Step 2: Install DocumentDB Extension + +```bash +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-cluster \ + --resource-group my-arc-rg \ + --cluster-type connectedClusters \ + --release-train stable +``` + +#### Step 3: Verify Installation + +```bash +# Check extension status +az k8s-extension show \ + --name documentdb-operator \ + --cluster-name my-cluster \ + --resource-group my-arc-rg \ + --cluster-type connectedClusters + +# Check pods in cluster +kubectl get pods -n documentdb-operator +kubectl get pods -n cnpg-system +``` + +## Configuration Options + +> **Note:** For all examples below, use `--cluster-type managedClusters` for AKS or `--cluster-type connectedClusters` for Arc-enabled clusters. + +### Basic Configuration + +```bash +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ # or connectedClusters + --configuration-settings documentDbVersion=0.1.3 \ + --configuration-settings replicaCount=1 +``` + +### Enable WAL Replica Feature + +```bash +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ # or connectedClusters + --configuration-settings walReplica=true +``` + +### Private Registry Authentication + +If using a private container registry: + +```bash +az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name my-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ # or connectedClusters + --configuration-protected-settings registry.username= \ + --configuration-protected-settings registry.password= +``` + +## Managing the Extension + +### Check Extension Status + +```bash +# For AKS +az k8s-extension show \ + --name documentdb-operator \ + --cluster-name my-aks-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ + --output table + +# For Arc-enabled clusters +az k8s-extension show \ + --name documentdb-operator \ + --cluster-name my-arc-cluster \ + --resource-group my-rg \ + --cluster-type connectedClusters \ + --output table +``` + +### Upgrade Extension + +```bash +az k8s-extension update \ + --name documentdb-operator \ + --cluster-name my-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ # or connectedClusters + --version 0.1.4 +``` + +### Uninstall Extension + +```bash +az k8s-extension delete \ + --name documentdb-operator \ + --cluster-name my-cluster \ + --resource-group my-rg \ + --cluster-type managedClusters \ # or connectedClusters + --yes +``` + +### Query All Installations (Cross-Cluster) + +Use Azure Resource Graph to find all DocumentDB installations across your subscriptions: + +```bash +az graph query -q " + resources + | where type == 'microsoft.kubernetesconfiguration/extensions' + | where properties.extensionType == 'Microsoft.DocumentDB.Operator' + | extend clusterType = case( + id contains 'managedClusters', 'AKS', + id contains 'connectedClusters', 'Arc', + 'Unknown') + | project subscriptionId, resourceGroup, + clusterName=split(id,'/')[8], clusterType, + version=properties.version +" +``` + +## Deploying DocumentDB Instances + +After the operator is installed, deploy DocumentDB instances: + +```yaml +# documentdb-instance.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: documentdb-ns +--- +apiVersion: v1 +kind: Secret +metadata: + name: documentdb-credentials + namespace: documentdb-ns +type: Opaque +stringData: + username: docdbadmin + password: YourSecurePassword123! +--- +apiVersion: documentdb.io/preview +kind: DocumentDB +metadata: + name: my-documentdb + namespace: documentdb-ns +spec: + nodeCount: 1 + instancesPerNode: 1 + documentDBImage: ghcr.io/microsoft/documentdb/documentdb-local:16 + gatewayImage: ghcr.io/microsoft/documentdb/documentdb-local:16 + documentDbCredentialSecret: documentdb-credentials + resource: + storage: + pvcSize: 10Gi +``` + +Apply the configuration: +```bash +kubectl apply -f documentdb-instance.yaml +``` + +## Azure Portal + +Once installed, you can view and manage the extension in Azure Portal: + +### For AKS Clusters +1. Navigate to **Kubernetes services** +2. Select your AKS cluster +3. Go to **Settings** > **Extensions** +4. Find **documentdb-operator** + +### For Arc-enabled Clusters +1. Navigate to **Azure Arc** > **Kubernetes clusters** +2. Select your cluster +3. Go to **Extensions** +4. Find **documentdb-operator** + +## Troubleshooting + +### Extension Installation Fails + +```bash +# Check extension status (use appropriate --cluster-type) +az k8s-extension show --name documentdb-operator \ + --cluster-name my-cluster --resource-group my-rg \ + --cluster-type managedClusters # or connectedClusters + +# For Arc-enabled clusters: Check Arc agent logs +kubectl logs -n azure-arc -l app.kubernetes.io/name=clusterconnect-agent + +# Check operator logs +kubectl logs -n documentdb-operator -l app.kubernetes.io/name=documentdb-operator +``` + +### Pods Not Starting + +```bash +# Check pod status +kubectl get pods -n documentdb-operator -o wide + +# Describe pod for events +kubectl describe pod -n documentdb-operator + +# Check CNPG operator +kubectl get pods -n cnpg-system +``` + +### Connectivity Issues + +Ensure outbound connectivity to: +- `ghcr.io` (port 443) - Container images + +**Additional for Arc-enabled clusters:** +- `*.servicebus.windows.net` (port 443) +- `*.guestconfiguration.azure.com` (port 443) + +## Support + +- [DocumentDB Operator Documentation](https://documentdb.io/documentdb-kubernetes-operator/preview/) +- [AKS Cluster Extensions](https://learn.microsoft.com/en-us/azure/aks/cluster-extensions) +- [Azure Arc Documentation](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/) +- [GitHub Issues](https://github.com/microsoft/documentdb-kubernetes-operator/issues) diff --git a/operator/arc-extension/extension.yaml b/operator/arc-extension/extension.yaml new file mode 100644 index 00000000..6a266a7a --- /dev/null +++ b/operator/arc-extension/extension.yaml @@ -0,0 +1,139 @@ +# Azure Extension Manifest for DocumentDB Kubernetes Operator +# +# This manifest tells Azure how to deploy the DocumentDB operator +# on Kubernetes clusters (AKS and Arc-enabled). +# +# Extension Type: Microsoft.DocumentDB.Operator +# Chart Source: OCI registry at ghcr.io/documentdb/documentdb-operator +# +# Supported cluster types: +# - managedClusters (AKS) - no Arc agent required +# - connectedClusters (Arc-enabled) - Arc agent required + +apiVersion: arc.azure.com/v1 +kind: ExtensionConfiguration +metadata: + name: documentdb-operator + namespace: documentdb-operator + +spec: + # Extension identity registered with Azure + extensionType: Microsoft.DocumentDB.Operator + + # Extension version (should match Helm chart version) + version: 0.1.3 + + # Release train configuration + releaseTrain: stable + autoUpgradeMinorVersion: true + + # Helm chart configuration + helm: + # OCI registry location (existing ghcr.io - no repackaging needed) + registryUrl: oci://ghcr.io/documentdb + chartName: documentdb-operator + chartVersion: "0.1.3" + + # Deployment configuration + releaseName: documentdb-operator + releaseNamespace: documentdb-operator + createNamespace: true + + # Default timeout for Helm operations + timeout: 10m + + # Values file for Arc-specific overrides + valuesFile: values-arc.yaml + + # User-configurable settings exposed via az k8s-extension create + # Example: az k8s-extension create ... --configuration-settings documentDbVersion=0.1.3 + configurationSettings: + - name: documentDbVersion + description: "DocumentDB operator version" + type: string + defaultValue: "0.1.3" + required: false + + - name: replicaCount + description: "Number of operator replicas" + type: integer + defaultValue: "1" + required: false + + - name: walReplica + description: "Enable WAL replica feature" + type: boolean + defaultValue: "false" + required: false + + # Protected settings (secrets) - not logged or displayed + configurationProtectedSettings: + - name: registry.username + description: "Container registry username (if private)" + type: string + required: false + + - name: registry.password + description: "Container registry password (if private)" + type: string + required: false + + # Health monitoring configuration + healthChecks: + # Primary operator deployment + - kind: Deployment + name: documentdb-operator + namespace: documentdb-operator + healthyThreshold: 1 + unhealthyThreshold: 3 + + # CNPG operator (dependency) + - kind: Deployment + name: cloudnative-pg + namespace: cnpg-system + healthyThreshold: 1 + unhealthyThreshold: 3 + + # Scope: cluster-wide (not namespace-scoped) + scope: + cluster: + releaseNamespace: documentdb-operator + + # Identity configuration for Azure resources access + identity: + type: SystemAssigned + + # Resource requirements for extension pods + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + +--- +# Release train definitions +apiVersion: arc.azure.com/v1 +kind: ReleaseTrainConfiguration +metadata: + name: documentdb-operator-release-trains + +spec: + releaseTrains: + # Stable release train - production ready + - name: stable + enabled: true + versions: + - "0.1.3" + - "0.1.2" + - "0.1.1" + defaultVersion: "0.1.3" + + # Preview release train - early access features + - name: preview + enabled: true + versions: + - "0.2.0-preview" + - "0.1.3" + defaultVersion: "0.1.3" diff --git a/operator/arc-extension/test-arc-extension.sh b/operator/arc-extension/test-arc-extension.sh new file mode 100755 index 00000000..cc8456e5 --- /dev/null +++ b/operator/arc-extension/test-arc-extension.sh @@ -0,0 +1,325 @@ +#!/bin/bash + +# Azure Extension Test Script for DocumentDB Operator +# +# This script helps test the extension locally before Azure registration. +# It uses Kind clusters to simulate Arc-enabled clusters (connectedClusters). +# +# For AKS (managedClusters) testing, use an actual AKS cluster and run: +# az k8s-extension create --cluster-type managedClusters ... +# +# This script can: +# 1. Create a Kind cluster for testing +# 2. Connect the cluster to Azure Arc +# 3. Simulate extension deployment (before registration) +# 4. Test full Arc extension flow (after registration) +# +# Usage: +# ./test-arc-extension.sh --setup-kind # Create Kind cluster only +# ./test-arc-extension.sh --connect-arc # Connect to Azure Arc +# ./test-arc-extension.sh --simulate-install # Simulate extension install (no Arc registration) +# ./test-arc-extension.sh --install-extension # Install via az k8s-extension (requires registration) +# ./test-arc-extension.sh --cleanup # Delete Kind cluster + +set -e + +# Configuration +CLUSTER_NAME="${ARC_CLUSTER_NAME:-arc-test-cluster}" +RESOURCE_GROUP="${ARC_RESOURCE_GROUP:-arc-test-rg}" +LOCATION="${ARC_LOCATION:-eastus}" +CHART_VERSION="${CHART_VERSION:-0.1.3}" +GITHUB_ORG="${GITHUB_ORG:-documentdb}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } +success() { echo -e "${GREEN}[$(date +'%H:%M:%S')] ✅ $1${NC}"; } +warn() { echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠️ $1${NC}"; } +error() { echo -e "${RED}[$(date +'%H:%M:%S')] ❌ $1${NC}"; exit 1; } + +# Check prerequisites +check_prerequisites() { + log "Checking prerequisites..." + + command -v kubectl &>/dev/null || error "kubectl not found" + command -v helm &>/dev/null || error "helm not found" + command -v az &>/dev/null || error "Azure CLI not found" + + success "Prerequisites met" +} + +# Create Kind cluster +setup_kind() { + log "Setting up Kind cluster: $CLUSTER_NAME" + + command -v kind &>/dev/null || error "kind not found. Install: https://kind.sigs.k8s.io/" + + if kind get clusters | grep -q "^${CLUSTER_NAME}$"; then + warn "Cluster $CLUSTER_NAME already exists" + else + cat </dev/null || error "Not logged into Azure. Run: az login" + + # Install/update extensions + log "Installing Azure CLI extensions..." + az extension add --name connectedk8s --upgrade --yes 2>/dev/null || true + az extension add --name k8s-extension --upgrade --yes 2>/dev/null || true + + # Create resource group + log "Creating resource group: $RESOURCE_GROUP" + az group create --name "$RESOURCE_GROUP" --location "$LOCATION" --output none 2>/dev/null || true + + # Connect to Arc + log "Connecting to Azure Arc (this may take a few minutes)..." + az connectedk8s connect \ + --name "$CLUSTER_NAME" \ + --resource-group "$RESOURCE_GROUP" \ + --location "$LOCATION" + + # Verify + log "Verifying Arc agent..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=clusterconnect-agent -n azure-arc --timeout=300s + + success "Cluster connected to Azure Arc" + log "View in portal: https://portal.azure.com/#view/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/~/overview" +} + +# Simulate extension install (before Azure registration) +simulate_install() { + log "Simulating Arc extension install (direct Helm)..." + log "This bypasses Arc and installs directly - use for local testing" + + # Check for GitHub credentials + if [ -z "$GITHUB_TOKEN" ] || [ -z "$GITHUB_USERNAME" ]; then + warn "GITHUB_TOKEN and GITHUB_USERNAME not set" + warn "Set them if ghcr.io requires authentication" + else + log "Authenticating with ghcr.io..." + echo "$GITHUB_TOKEN" | helm registry login ghcr.io --username "$GITHUB_USERNAME" --password-stdin + fi + + # Install using Helm (simulates what Arc does) + log "Installing DocumentDB operator via Helm..." + helm upgrade --install documentdb-operator \ + oci://ghcr.io/${GITHUB_ORG}/documentdb-operator \ + --version "$CHART_VERSION" \ + --namespace documentdb-operator \ + --create-namespace \ + --values "$(dirname "$0")/values-arc.yaml" \ + --wait \ + --timeout 10m + + # Verify + log "Verifying installation..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=documentdb-operator -n documentdb-operator --timeout=300s + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=cloudnative-pg -n cnpg-system --timeout=300s + + success "Simulated extension install complete" + + echo "" + log "Installed components:" + kubectl get pods -n documentdb-operator + kubectl get pods -n cnpg-system +} + +# Install via Arc extension (requires registration) +install_extension() { + log "Installing DocumentDB operator via Azure Arc extension..." + + # Check Arc connection + az connectedk8s show --name "$CLUSTER_NAME" --resource-group "$RESOURCE_GROUP" &>/dev/null \ + || error "Cluster not connected to Arc. Run: $0 --connect-arc" + + # Install extension + az k8s-extension create \ + --name documentdb-operator \ + --extension-type Microsoft.DocumentDB.Operator \ + --cluster-name "$CLUSTER_NAME" \ + --resource-group "$RESOURCE_GROUP" \ + --cluster-type connectedClusters \ + --release-train stable \ + --configuration-settings documentDbVersion="$CHART_VERSION" + + # Verify + log "Verifying extension..." + az k8s-extension show \ + --name documentdb-operator \ + --cluster-name "$CLUSTER_NAME" \ + --resource-group "$RESOURCE_GROUP" \ + --cluster-type connectedClusters \ + --output table + + success "Extension installed via Azure Arc" +} + +# Show extension status +show_status() { + log "Extension status:" + + echo "" + echo "=== Azure Arc Extension ===" + az k8s-extension show \ + --name documentdb-operator \ + --cluster-name "$CLUSTER_NAME" \ + --resource-group "$RESOURCE_GROUP" \ + --cluster-type connectedClusters \ + --output table 2>/dev/null || warn "Extension not found in Azure" + + echo "" + echo "=== Kubernetes Pods ===" + echo "DocumentDB Operator:" + kubectl get pods -n documentdb-operator 2>/dev/null || warn "Namespace not found" + echo "" + echo "CNPG Operator:" + kubectl get pods -n cnpg-system 2>/dev/null || warn "Namespace not found" + echo "" + echo "Azure Arc Agent:" + kubectl get pods -n azure-arc 2>/dev/null || warn "Arc agent not installed" +} + +# Uninstall extension +uninstall_extension() { + log "Uninstalling extension..." + + # Try Arc uninstall first + az k8s-extension delete \ + --name documentdb-operator \ + --cluster-name "$CLUSTER_NAME" \ + --resource-group "$RESOURCE_GROUP" \ + --cluster-type connectedClusters \ + --yes 2>/dev/null || warn "Arc extension not found" + + # Helm uninstall (if simulated install) + helm uninstall documentdb-operator -n documentdb-operator 2>/dev/null || true + + # Cleanup namespaces + kubectl delete namespace documentdb-operator --ignore-not-found + kubectl delete namespace cnpg-system --ignore-not-found + + success "Extension uninstalled" +} + +# Cleanup everything +cleanup() { + log "Cleaning up..." + + # Disconnect from Arc + az connectedk8s delete \ + --name "$CLUSTER_NAME" \ + --resource-group "$RESOURCE_GROUP" \ + --yes 2>/dev/null || warn "Cluster not connected to Arc" + + # Delete Kind cluster + kind delete cluster --name "$CLUSTER_NAME" 2>/dev/null || warn "Kind cluster not found" + + # Optionally delete resource group + read -p "Delete resource group $RESOURCE_GROUP? (y/N) " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + az group delete --name "$RESOURCE_GROUP" --yes --no-wait + log "Resource group deletion initiated" + fi + + success "Cleanup complete" +} + +# Print usage +usage() { + cat < Date: Tue, 24 Feb 2026 11:57:00 -0800 Subject: [PATCH 2/4] Add more references Signed-off-by: Rayhan Hossain --- .../azure-arc/azure-arc-integration-plan.md | 93 ++++++++++++------- 1 file changed, 62 insertions(+), 31 deletions(-) diff --git a/docs/designs/azure-arc/azure-arc-integration-plan.md b/docs/designs/azure-arc/azure-arc-integration-plan.md index 1cce5189..d725c5d8 100644 --- a/docs/designs/azure-arc/azure-arc-integration-plan.md +++ b/docs/designs/azure-arc/azure-arc-integration-plan.md @@ -2,11 +2,13 @@ ## Overview -This document outlines a two-phase plan to integrate DocumentDB Kubernetes Operator with Azure extensions for deployment tracking and billing across **all** Kubernetes environments. +This document outlines a two-phase plan to integrate DocumentDB Kubernetes Operator with Azure Kubernetes extensions for deployment tracking and billing across **all** Kubernetes environments. **Goal:** Enable customers to install DocumentDB on any Kubernetes cluster (AKS, EKS, GKE, on-premises) while providing visibility in Azure Portal and usage-based billing. -**Key Insight:** Azure knows a cluster exists, but NOT what's installed on it. Extensions solve this tracking gap. +**Key Insight:** Azure knows a cluster exists, but NOT what's installed on it. Kubernetes extensions solve this tracking gap. + +> **Note:** "Kubernetes extensions" (via `az k8s-extension`) are specific to Kubernetes clusters. Azure has separate extension mechanisms for VMs, Arc servers, etc. ### Cluster Type Support @@ -17,7 +19,7 @@ This document outlines a two-phase plan to integrate DocumentDB Kubernetes Opera | **GKE** (GCP) | `connectedClusters` | ✅ Yes | | **On-premises** | `connectedClusters` | ✅ Yes | -**Same extension type (`Microsoft.DocumentDB.Operator`) works for all cluster types.** +**Same Kubernetes extension type (`Microsoft.DocumentDB.Operator`) works for all cluster types.** --- @@ -51,11 +53,11 @@ This document outlines a two-phase plan to integrate DocumentDB Kubernetes Opera ## Deployment Options -Two approaches are available for deploying DocumentDB via Azure Arc: +Two approaches are available for deploying DocumentDB via Azure: -### Option A: Azure Extension (Full Registration) +### Option A: Kubernetes Extension (Full Registration) -Deploy as an official Azure extension type. **Works for both AKS and Arc-enabled clusters.** +Deploy as an official Azure Kubernetes extension type via `az k8s-extension`. **Works for both AKS and Arc-enabled clusters.** ```bash # For AKS clusters (no Arc agent needed) @@ -113,8 +115,8 @@ az k8s-configuration flux create \ ### Comparison -| Feature | Option A: Azure Extension | Option B: Flux GitOps | -|---------|--------------------------|----------------------| +| Feature | Option A: K8s Extension | Option B: Flux GitOps | +|---------|------------------------|----------------------| | **AKS support** | ✅ Yes (managedClusters) | ✅ Yes (managedClusters) | | **Arc cluster support** | ✅ Yes (connectedClusters) | ✅ Yes (connectedClusters) | | **Azure registration required** | ✅ Yes (approval process) | ❌ No | @@ -130,14 +132,14 @@ az k8s-configuration flux create \ ### Pros & Cons -#### Option A: Arc Extension +#### Option A: Kubernetes Extension **Pros:** - Official Azure Marketplace presence - Built-in health monitoring and status reporting -- Native Arc metering for billing (Phase 2) +- Native Azure metering for billing (Phase 2) - Enterprise support from Microsoft -- Consistent experience with other Arc extensions +- Consistent experience with other K8s extensions (Defender, Policy, etc.) **Cons:** - Requires Azure extension type registration (approval process) @@ -164,27 +166,27 @@ az k8s-configuration flux create \ | Scenario | Recommended Option | |----------|-------------------| | Need to deploy immediately | **Option B: Flux GitOps** | -| Want Azure Marketplace presence | **Option A: Arc Extension** | -| Require built-in Arc billing | **Option A: Arc Extension** | +| Want Azure Marketplace presence | **Option A: K8s Extension** | +| Require built-in Azure billing | **Option A: K8s Extension** | | Already using GitOps workflow | **Option B: Flux GitOps** | -| Enterprise customers expecting official extension | **Option A: Arc Extension** | +| Enterprise customers expecting official extension | **Option A: K8s Extension** | --- -## Phase 1: Arc Extension + ARM Visibility +## Phase 1: K8s Extension + ARM Visibility **Duration:** 3-4 weeks -**Goal:** Install DocumentDB via Arc, view in Azure Portal +**Goal:** Install DocumentDB via `az k8s-extension`, view in Azure Portal -> **Note:** This phase covers Option A (Arc Extension). For Option B (Flux GitOps), skip to the [Flux GitOps Setup](#flux-gitops-setup-option-b) section. +> **Note:** This phase covers Option A (K8s Extension). For Option B (Flux GitOps), skip to the [Flux GitOps Setup](#flux-gitops-setup-option-b) section. ### What Gets Deployed | Component | Deployed By | Location | |-----------|-------------|----------| -| Azure Arc Agent | Customer (one-time per cluster) | `azure-arc` namespace | -| DocumentDB Operator | Arc Extension Manager | `documentdb-operator` namespace | -| CloudNative-PG Operator | Helm dependency | `documentdb-operator` namespace | +| Azure Arc Agent | Customer (one-time, non-AKS only) | `azure-arc` namespace | +| DocumentDB Operator | K8s Extension Manager | `documentdb-operator` namespace | +| CloudNative-PG Operator | Helm dependency | `cnpg-operator` namespace | ### Task Breakdown @@ -196,7 +198,7 @@ Create `extension.yaml` that tells Arc how to deploy the operator. ``` operator/ └── arc-extension/ - ├── extension.yaml # Arc extension manifest + ├── extension.yaml # K8s extension manifest ├── values-arc.yaml # Arc-specific Helm overrides └── README.md # Installation guide ``` @@ -249,13 +251,23 @@ kubectl get deployment documentdb-operator -n documentdb-operator #### Task 1.4: Register Extension Type with Azure (Week 3) -Register `Microsoft.DocumentDB.Operator` as valid Arc extension type. +Register `Microsoft.DocumentDB.Operator` as valid K8s extension type. **Steps:** -1. Submit extension registration request to Azure Arc team -2. Provide extension manifest and chart location -3. Configure release trains (preview, stable) -4. Wait for approval (may take several days) +1. **Contact Azure Arc team** via one of: + - Internal: [Extension Registration (eng.ms)](https://eng.ms/docs/cloud-ai-platform/azure-edge-platform-aep/aep-arc-for-kubernetes/arc-for-k8s-developer-docs/extension-registration) + - External: [Arc K8s Extensions Feedback](https://aka.ms/arc-k8s-extensions-feedback) +2. **Provide extension manifest** including: + - Extension type name: `Microsoft.DocumentDB.Operator` + - Helm chart OCI URL: `oci://ghcr.io/documentdb/documentdb-operator` + - Chart version(s): `0.1.3` + - Configuration settings schema + - Health check definitions +3. **Configure release trains** (preview, stable) +4. **Test in staging environment** (Azure provides test tenant) +5. **Promote to production** after validation + +> **Note:** There's no self-service portal for extension registration. This requires manual coordination with the Azure Arc team. **Deliverable:** Registered extension type in Azure @@ -656,7 +668,7 @@ func (r *Reporter) ReportToAzureMonitor(ctx context.Context, record *UsageRecord - Azure CLI with `connectedk8s` extension - Arc agent installed on cluster (`az connectedk8s connect`) -**Additional for Option A (Azure Extension):** +**Additional for Option A (K8s Extension):** - Extension type registration (requires Microsoft approval) - Azure Commerce onboarding (Phase 2 billing) @@ -683,6 +695,15 @@ az graph query -q " ## References +### Extension Development & Registration + +- [Cluster Extensions Conceptual Overview](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-extensions) - How extensions work +- [Extensions Release & Publishing](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/extensions-release) - Partner extension guide +- [Extension Type Registration (Internal)](https://eng.ms/docs/cloud-ai-platform/azure-edge-platform-aep/aep-arc-for-kubernetes/arc-for-k8s-developer-docs/extension-registration) - Microsoft internal docs (requires corp access) +- [Arc K8s Extensions Feedback](https://aka.ms/arc-k8s-extensions-feedback) - Request new extension registration + +### General Documentation + - [AKS Cluster Extensions](https://learn.microsoft.com/en-us/azure/aks/cluster-extensions) - [Azure Arc-enabled Kubernetes](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/) - [Create Arc Extensions](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/extensions) @@ -692,6 +713,16 @@ az graph query -q " - [Azure Kubernetes Fleet Manager](https://learn.microsoft.com/en-us/azure/kubernetes-fleet/) - [Azure Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/) +### Example Extensions (Reference Implementations) + +| Extension | Type | Docs | +|-----------|------|------| +| Azure Monitor | `Microsoft.AzureMonitor.Containers` | [Container Insights](https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-enable-arc-enabled-clusters) | +| Azure Defender | `Microsoft.AzureDefender.Kubernetes` | [Defender for Containers](https://learn.microsoft.com/en-us/azure/defender-for-cloud/defender-for-kubernetes-azure-arc) | +| Azure ML | `Microsoft.AzureML.Kubernetes` | [ML on Arc](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-attach-kubernetes-anywhere) | +| Flux | `Microsoft.Flux` | [GitOps with Flux](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-gitops-flux2) | +| Dapr | `Microsoft.Dapr` | [Dapr extension](https://learn.microsoft.com/en-us/azure/aks/dapr) | + --- ## Appendix A: Azure Fleet Manager and Arc @@ -751,9 +782,9 @@ Fleet uses: | **GKE** (GCP) | ✅ Yes | Arc provides Azure connectivity | | **On-premises** | ✅ Yes | Arc provides Azure connectivity | -### Fleet vs Arc Extension: Different Use Cases +### Fleet vs K8s Extension: Different Use Cases -| Aspect | Arc Extension | Fleet Manager | +| Aspect | K8s Extension | Fleet Manager | |--------|---------------|---------------| | **Primary purpose** | Per-cluster app lifecycle | Multi-cluster orchestration | | **Installation trigger** | `az k8s-extension create` | ClusterResourcePlacement (CRP) | @@ -793,10 +824,10 @@ kubectl get clusterresourceplacement documentdb-operator \ | Customer Profile | Recommended Approach | |------------------|---------------------| -| Single cluster | Arc Extension (Option A or B) | +| Single cluster | K8s Extension (Option A or B) | | Multiple AKS clusters | Fleet Manager (no Arc agents needed) | | Multi-cloud (AKS + EKS/GKE) | Fleet + Arc agents on non-AKS clusters | -| Needs Azure Marketplace billing | Arc Extension (Option A) | +| Needs Azure Marketplace billing | K8s Extension (Option A) | | Enterprise with existing Fleet | Fleet CRPs for deployment, custom metering for billing | ### Existing Fleet Implementation From 0d834edd8654c2e25da540618de7df97b5fe52af Mon Sep 17 00:00:00 2001 From: Rayhan Hossain Date: Mon, 2 Mar 2026 10:34:27 -0800 Subject: [PATCH 3/4] Add playground example with Azure Arc and Fleet Signed-off-by: Rayhan Hossain --- .../azure-arc/azure-arc-integration-plan.md | 2 + .../AGENT-INSTRUCTIONS.md | 299 ++++++++++++++ .../arc-hybrid-setup-with-fleet/README.md | 383 ++++++++++++++++++ .../arc-hybrid-setup-with-fleet/cleanup.sh | 124 ++++++ .../deploy-documentdb-fleet.sh | 188 +++++++++ .../documentdb-instance.yaml | 18 + .../setup-arc-member.ps1 | 214 ++++++++++ .../setup-arc-member.sh | 200 +++++++++ .../setup-fleet-hub.ps1 | 150 +++++++ .../setup-fleet-hub.sh | 149 +++++++ .../verify-portal.sh | 123 ++++++ 11 files changed, 1850 insertions(+) create mode 100644 documentdb-playground/arc-hybrid-setup-with-fleet/AGENT-INSTRUCTIONS.md create mode 100644 documentdb-playground/arc-hybrid-setup-with-fleet/README.md create mode 100755 documentdb-playground/arc-hybrid-setup-with-fleet/cleanup.sh create mode 100755 documentdb-playground/arc-hybrid-setup-with-fleet/deploy-documentdb-fleet.sh create mode 100644 documentdb-playground/arc-hybrid-setup-with-fleet/documentdb-instance.yaml create mode 100644 documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.ps1 create mode 100755 documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.sh create mode 100644 documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.ps1 create mode 100755 documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.sh create mode 100755 documentdb-playground/arc-hybrid-setup-with-fleet/verify-portal.sh diff --git a/docs/designs/azure-arc/azure-arc-integration-plan.md b/docs/designs/azure-arc/azure-arc-integration-plan.md index d725c5d8..6f5b1b7a 100644 --- a/docs/designs/azure-arc/azure-arc-integration-plan.md +++ b/docs/designs/azure-arc/azure-arc-integration-plan.md @@ -10,6 +10,8 @@ This document outlines a two-phase plan to integrate DocumentDB Kubernetes Opera > **Note:** "Kubernetes extensions" (via `az k8s-extension`) are specific to Kubernetes clusters. Azure has separate extension mechanisms for VMs, Arc servers, etc. +> **Interim Solution:** For immediate portal visibility without extension registration or billing integration, see [arc-hybrid-setup-with-fleet](../../../documentdb-playground/arc-hybrid-setup-with-fleet/) which uses Azure Fleet Manager + direct Helm deployment. This provides cluster tracking today while the full integration is being developed. + ### Cluster Type Support | Cluster Type | Extension Cluster Type | Arc Agent Required? | diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/AGENT-INSTRUCTIONS.md b/documentdb-playground/arc-hybrid-setup-with-fleet/AGENT-INSTRUCTIONS.md new file mode 100644 index 00000000..d87bb99f --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/AGENT-INSTRUCTIONS.md @@ -0,0 +1,299 @@ +# Copilot Agent Instructions: DocumentDB Fleet Hybrid Setup + +## Overview + +This guide helps AI agents assist users in setting up DocumentDB on a hybrid environment using Azure Fleet Manager. The setup creates visibility in Azure Portal for both cloud (AKS) and on-premises (Arc-enabled) clusters. + +**End Result:** +- Fleet Hub with 2 member clusters +- AKS cluster with DocumentDB operator + instance +- Kind cluster (simulating on-prem) with Arc agent, DocumentDB operator + instance +- Full visibility in Azure Portal + +**Estimated Time:** 30-45 minutes + +## Agent Behavior + +**CRITICAL:** This setup requires commands in **two different environments**: + +| Environment | Used For | Commands | +|-------------|----------|----------| +| **WSL/Linux** | Kind, Docker, kubectl, Helm | `kind`, `docker`, `kubectl`, `helm` | +| **PowerShell (Windows)** | Azure CLI | `az` | + +**Always tell the user which environment to run each command in.** + +> **Why dual environments?** In corporate environments with Conditional Access Policies, `az login` fails in WSL. PowerShell bypasses this limitation. + +--- + +## Environment Setup + +### WSL Terminal +```bash +# Verify tools +kubectl version --client +helm version --short +kind --version +docker ps +``` + +### PowerShell Terminal +```powershell +# Verify and install extensions +az --version # Must be 2.50.0+ +az extension add --name fleet --upgrade --yes +az extension add --name connectedk8s --upgrade --yes +az extension add --name k8s-extension --upgrade --yes +``` + +**KUBECONFIG Setup (PowerShell):** +```powershell +$env:KUBECONFIG = "\\wsl.localhost\Ubuntu\home\$env:USERNAME\.kube\config" +``` + +--- + +## Execution Instructions + +### Phase 1: Create Fleet Hub + AKS (PowerShell) + +**Tell user:** "Run these commands in PowerShell" + +```powershell +# Variables - user can customize these +$RESOURCE_GROUP = "documentdb-fleet-rg" +$LOCATION = "westus2" +$FLEET_NAME = "documentdb-fleet" +$AKS_CLUSTER = "documentdb-aks" + +# Create resource group +az group create --name $RESOURCE_GROUP --location $LOCATION + +# Create Fleet hub (hubless mode) +az fleet create --resource-group $RESOURCE_GROUP --name $FLEET_NAME --location $LOCATION + +# Create AKS cluster (~5-10 minutes) +az aks create ` + --resource-group $RESOURCE_GROUP ` + --name $AKS_CLUSTER ` + --node-count 2 ` + --node-vm-size Standard_D4s_v3 ` + --enable-managed-identity ` + --generate-ssh-keys + +# Join AKS to Fleet +$AKS_ID = az aks show -g $RESOURCE_GROUP -n $AKS_CLUSTER --query id -o tsv +az fleet member create ` + --resource-group $RESOURCE_GROUP ` + --fleet-name $FLEET_NAME ` + --name $AKS_CLUSTER ` + --member-cluster-id $AKS_ID + +# Get AKS credentials to WSL kubeconfig +$env:KUBECONFIG = "\\wsl.localhost\Ubuntu\home\$env:USERNAME\.kube\config" +az aks get-credentials --resource-group $RESOURCE_GROUP --name $AKS_CLUSTER --overwrite-existing +``` + +**Success check:** `az fleet member list` shows `documentdb-aks` + +### Phase 2: Install cert-manager on AKS (WSL) + +**Tell user:** "Run these commands in WSL" + +```bash +kubectl config use-context documentdb-aks +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml +kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=300s +``` + +**Success check:** All cert-manager pods Running + +### Phase 3: Create Kind Cluster (WSL) + +**Tell user:** "Run these commands in WSL" + +```bash +kind create cluster --name documentdb-onprem --config - < **Note:** In corporate environments with Conditional Access Policies, `az login` may fail in WSL. Use PowerShell for all Azure CLI commands. + +## Prerequisites + +### Required Versions + +| Tool | Minimum Version | Check Command | +|------|-----------------|---------------| +| Azure CLI | 2.50.0+ | `az --version` | +| kubectl | 1.26+ | `kubectl version --client` | +| Helm | 3.12+ | `helm version --short` | +| Kind | 0.20+ | `kind --version` | +| Docker | 20.10+ | `docker --version` | + +### Verify Prerequisites + +**In WSL:** +```bash +kubectl version --client +helm version --short +kind --version +docker ps +``` + +**In PowerShell:** +```powershell +az --version +az extension add --name fleet --upgrade --yes +az extension add --name connectedk8s --upgrade --yes +az extension add --name k8s-extension --upgrade --yes +``` + +## Quick Start + +### Option A: Step-by-Step (Recommended) + +Follow the detailed [Step-by-Step Guide](#step-by-step-guide) below. + +### Option B: Using Scripts + +**In WSL (for Kind/kubectl/Helm):** +```bash +cd documentdb-playground/arc-hybrid-setup-with-fleet +./setup-arc-member.sh # Creates Kind cluster +``` + +**In PowerShell (for Azure CLI):** +```powershell +cd \\wsl.localhost\Ubuntu\home\$env:USERNAME\path\to\documentdb-playground\arc-hybrid-setup-with-fleet +.\setup-fleet-hub.ps1 # Creates Fleet + AKS +.\setup-arc-member.ps1 # Arc-enables Kind + joins Fleet +``` + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ Azure Portal │ +│ ┌──────────────────────────────────────────────────────────────────┐ │ +│ │ Azure Fleet Manager │ │ +│ │ • Fleet Hub (member management) │ │ +│ │ • AKS Member (managedClusters) ─── Portal visibility │ │ +│ │ • Arc Member (connectedClusters) ─ Portal visibility + token │ │ +│ └──────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────┘ + │ + Direct Helm deployment to each cluster + (hubless Fleet - no CRP propagation) + │ + ┌─────────────────────┴─────────────────────┐ + ▼ ▼ +┌───────────────────────┐ ┌───────────────────────┐ +│ AKS Member Cluster │ │ Arc-Enabled Member │ +│ (Azure-managed) │ │ (On-Prem/Kind) │ +├───────────────────────┤ ├───────────────────────┤ +│ cert-manager │ │ cert-manager │ +│ DocumentDB Operator │ │ DocumentDB Operator │ +│ (deployed by Helm) │ │ Azure Arc Agents │ +├───────────────────────┤ ├───────────────────────┤ +│ DocumentDB Cluster │ │ DocumentDB Cluster │ +└───────────────────────┘ └───────────────────────┘ +``` + +## Step-by-Step Guide + +### Phase 1: Create Fleet Hub + AKS (PowerShell) + +```powershell +# Variables +$RESOURCE_GROUP = "documentdb-fleet-rg" +$LOCATION = "westus2" +$FLEET_NAME = "documentdb-fleet" +$AKS_CLUSTER = "documentdb-aks" + +# Create resource group +az group create --name $RESOURCE_GROUP --location $LOCATION + +# Create Fleet hub (hubless mode) +az fleet create --resource-group $RESOURCE_GROUP --name $FLEET_NAME --location $LOCATION + +# Create AKS cluster (~5-10 minutes) +az aks create ` + --resource-group $RESOURCE_GROUP ` + --name $AKS_CLUSTER ` + --node-count 2 ` + --node-vm-size Standard_D4s_v3 ` + --enable-managed-identity ` + --generate-ssh-keys + +# Join AKS to Fleet +$AKS_ID = az aks show -g $RESOURCE_GROUP -n $AKS_CLUSTER --query id -o tsv +az fleet member create ` + --resource-group $RESOURCE_GROUP ` + --fleet-name $FLEET_NAME ` + --name $AKS_CLUSTER ` + --member-cluster-id $AKS_ID + +# Get AKS credentials to WSL kubeconfig +$env:KUBECONFIG = "\\wsl.localhost\Ubuntu\home\$env:USERNAME\.kube\config" +az aks get-credentials --resource-group $RESOURCE_GROUP --name $AKS_CLUSTER --overwrite-existing +``` + +### Phase 2: Install cert-manager on AKS (WSL) + +```bash +kubectl config use-context documentdb-aks +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml +kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=300s +``` + +### Phase 3: Create Kind Cluster (WSL) + +```bash +kind create cluster --name documentdb-onprem --config - < **Important:** Copy and save the token output - you'll need it for Azure Portal. + +### Phase 8: Deploy DocumentDB Instance (WSL) + +Deploy a DocumentDB instance on both clusters: + +```bash +# Create namespace and credentials on Kind cluster +kubectl config use-context kind-documentdb-onprem +kubectl create namespace app-namespace +kubectl create secret generic documentdb-credentials \ + --namespace app-namespace \ + --from-literal=username=docdbuser \ + --from-literal=password=YourSecurePassword123! + +# Deploy DocumentDB instance +kubectl apply -f documentdb-instance.yaml + +# Wait for pod to be ready +kubectl get pods -n app-namespace -w +# Wait until: demo-documentdb-1 2/2 Running + +# Verify DocumentDB is healthy +kubectl get documentdb -n app-namespace +# Expected: STATUS = "Cluster in healthy state" +``` + +Repeat for AKS cluster: + +```bash +kubectl config use-context documentdb-aks +kubectl create namespace app-namespace +kubectl create secret generic documentdb-credentials \ + --namespace app-namespace \ + --from-literal=username=docdbuser \ + --from-literal=password=YourSecurePassword123! + +kubectl apply -f documentdb-instance.yaml +kubectl get pods -n app-namespace -w +kubectl get documentdb -n app-namespace +``` + +### Phase 9: Verify in Azure Portal + +**AKS Cluster:** +1. Navigate to: Azure Portal → AKS → `documentdb-aks` → **Workloads** +2. Select namespace: `app-namespace` +3. View `demo-documentdb-1` pod + +**Arc-Enabled Cluster:** +1. Navigate to: Azure Portal → Arc → Kubernetes → `documentdb-onprem` +2. Go to **Kubernetes resources** → **Workloads** +3. Click **Sign in with service account token** +4. Paste the token from Phase 7 +5. Select namespace: `app-namespace` +6. View `demo-documentdb-1` pod + +**Fleet Manager:** +1. Navigate to: Azure Portal → Fleet Manager → `documentdb-fleet` +2. Go to **Members** to see both clusters + +## Success Criteria + +After completing all phases, verify: + +| Check | Expected Result | +|-------|-----------------| +| Fleet members | 2 clusters (documentdb-aks, documentdb-onprem) | +| Arc connectivity | `Connected` status | +| Operator pods | Running on both clusters | +| DocumentDB status | "Cluster in healthy state" on both | +| Portal visibility | Can see pods in both AKS and Arc portals | + +## Portal Links + +| Resource | URL | +|----------|-----| +| Fleet Hub | https://portal.azure.com/#view/Microsoft_Azure_Fleet/FleetMenuBlade | +| Arc Clusters | https://portal.azure.com/#view/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/~/kubernetesServices | +| AKS Clusters | https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.ContainerService%2FmanagedClusters | + +## Troubleshooting + +| Issue | Cause | Solution | +|-------|-------|----------| +| `az login` fails in WSL | Conditional Access Policy | Use PowerShell for all `az` commands | +| Kind context not found | kubeconfig not shared | Set `$env:KUBECONFIG` to WSL path in PowerShell | +| Arc CRD conflicts | Previous Arc install | Delete Kind cluster with `kind delete cluster --name documentdb-onprem` and recreate | +| Arc "token required" | Missing service account | Create token per Phase 7 | +| Pods not visible in Portal | Wrong namespace selected | Change namespace filter to `app-namespace` | +| Helm not found in PowerShell | Helm not in PATH | Run Helm commands from WSL only | +| AKS creation fails | Quota exceeded | Try different region or request quota increase | + +## Cleanup + +When you're done, clean up all resources: + +**In WSL:** +```bash +kind delete cluster --name documentdb-onprem +``` + +**In PowerShell:** +```powershell +# This deletes: Fleet hub, AKS cluster, Arc registration, all resources +az group delete --name documentdb-fleet-rg --yes --no-wait +``` + +## Files in This Directory + +| File | Purpose | +|------|---------| +| `README.md` | This guide | +| `AGENT-INSTRUCTIONS.md` | Instructions for Copilot agents | +| `documentdb-instance.yaml` | Sample DocumentDB CR for deployment | +| `setup-fleet-hub.ps1` | PowerShell script for Fleet + AKS setup | +| `setup-arc-member.ps1` | PowerShell script for Arc + Fleet join | +| `setup-arc-member.sh` | Bash script for Kind cluster creation | +| `cleanup.sh` | Cleanup script | + +## Related Documentation + +- [Multi-cluster replication](../multi-cloud-deployment/) - Cross-cluster DocumentDB replication +- [TLS configuration](../tls/) - Certificate setup options +- [Monitoring](../telemetry/) - OpenTelemetry and Prometheus integration +- [Azure Arc Integration Plan](../../docs/designs/azure-arc/azure-arc-integration-plan.md) - Full Azure product integration roadmap diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/cleanup.sh b/documentdb-playground/arc-hybrid-setup-with-fleet/cleanup.sh new file mode 100755 index 00000000..d39172f8 --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/cleanup.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Cleanup script for Fleet-based hybrid setup +# Removes Fleet hub, AKS, Arc clusters, and resources + +set -e + +# Load environment if available +[[ -f .fleet-env ]] && source .fleet-env + +# Configuration +RESOURCE_GROUP="${RESOURCE_GROUP:-documentdb-fleet-rg}" +FLEET_NAME="${FLEET_NAME:-documentdb-fleet}" +AKS_CLUSTER="${AKS_CLUSTER:-documentdb-aks}" +ARC_CLUSTER="${ARC_CLUSTER:-documentdb-onprem}" +FORCE="${FORCE:-false}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } +success() { echo -e "${GREEN}[$(date +'%H:%M:%S')] ✅ $1${NC}"; } +warn() { echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠️ $1${NC}"; } + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --force|-f) + FORCE="true" + shift + ;; + --resource-group) + RESOURCE_GROUP="$2" + shift 2 + ;; + -h|--help) + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --force, -f Skip confirmation prompts" + echo " --resource-group Resource group to clean up" + exit 0 + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +echo "" +echo "This will delete:" +echo " - Kind cluster: $ARC_CLUSTER" +echo " - Arc registration: $ARC_CLUSTER" +echo " - Fleet members (all)" +echo " - Fleet hub: $FLEET_NAME" +echo " - AKS cluster: $AKS_CLUSTER" +echo " - Resource group: $RESOURCE_GROUP (and all resources)" +echo "" + +if [[ "$FORCE" != "true" ]]; then + read -p "Are you sure? (y/N) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Cancelled." + exit 0 + fi +fi + +# Delete Kind cluster +if command -v kind &>/dev/null; then + log "Deleting Kind cluster: $ARC_CLUSTER..." + kind delete cluster --name "$ARC_CLUSTER" 2>/dev/null || warn "Kind cluster not found" +fi + +# Delete Fleet members first +log "Deleting Fleet members..." +MEMBERS=$(az fleet member list --resource-group "$RESOURCE_GROUP" --fleet-name "$FLEET_NAME" --query "[].name" -o tsv 2>/dev/null) || true +for MEMBER in $MEMBERS; do + log "Deleting Fleet member: $MEMBER..." + az fleet member delete \ + --resource-group "$RESOURCE_GROUP" \ + --fleet-name "$FLEET_NAME" \ + --name "$MEMBER" \ + --yes 2>/dev/null || warn "Fleet member $MEMBER not found" +done + +# Delete Arc registration +log "Deleting Arc cluster registration..." +az connectedk8s delete \ + --name "$ARC_CLUSTER" \ + --resource-group "$RESOURCE_GROUP" \ + --yes 2>/dev/null || warn "Arc cluster not found" + +# Delete Fleet hub +log "Deleting Fleet hub: $FLEET_NAME..." +az fleet delete \ + --resource-group "$RESOURCE_GROUP" \ + --name "$FLEET_NAME" \ + --yes 2>/dev/null || warn "Fleet hub not found" + +# Delete AKS cluster +log "Deleting AKS cluster: $AKS_CLUSTER..." +az aks delete \ + --name "$AKS_CLUSTER" \ + --resource-group "$RESOURCE_GROUP" \ + --yes --no-wait 2>/dev/null || warn "AKS cluster not found" + +# Delete resource group +log "Deleting resource group: $RESOURCE_GROUP..." +az group delete \ + --name "$RESOURCE_GROUP" \ + --yes --no-wait 2>/dev/null || warn "Resource group not found" + +# Clean up local files +rm -f .fleet-env 2>/dev/null || true + +success "Cleanup initiated!" +echo "" +echo "Note: AKS and resource group deletion runs in background." +echo "Check Azure Portal to confirm completion." diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/deploy-documentdb-fleet.sh b/documentdb-playground/arc-hybrid-setup-with-fleet/deploy-documentdb-fleet.sh new file mode 100755 index 00000000..bf2a47fe --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/deploy-documentdb-fleet.sh @@ -0,0 +1,188 @@ +#!/bin/bash +# Deploy DocumentDB to all Fleet member clusters +# Uses vanilla Helm deployment (no extension registration needed) + +set -e + +# Load environment from previous scripts if available +[[ -f .fleet-env ]] && source .fleet-env + +# Configuration +RESOURCE_GROUP="${RESOURCE_GROUP:-documentdb-fleet-rg}" +FLEET_NAME="${FLEET_NAME:-documentdb-fleet}" +AKS_CLUSTER="${AKS_CLUSTER:-documentdb-aks}" +ARC_CLUSTER="${ARC_CLUSTER:-documentdb-onprem}" +DEPLOY_INSTANCES="${DEPLOY_INSTANCES:-true}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } +success() { echo -e "${GREEN}[$(date +'%H:%M:%S')] ✅ $1${NC}"; } +warn() { echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠️ $1${NC}"; } +error() { echo -e "${RED}[$(date +'%H:%M:%S')] ❌ $1${NC}"; exit 1; } + +# Help +if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Deploys DocumentDB operator to all Fleet member clusters" + echo "" + echo "Environment variables:" + echo " RESOURCE_GROUP Azure resource group" + echo " FLEET_NAME Fleet hub name" + echo " AKS_CLUSTER AKS cluster name" + echo " ARC_CLUSTER Arc cluster name" + echo " DEPLOY_INSTANCES Deploy DocumentDB instances (default: true)" + exit 0 +fi + +# Check prerequisites +log "Checking prerequisites..." +command -v az &> /dev/null || error "Azure CLI not found" +command -v kubectl &> /dev/null || error "kubectl not found" +command -v helm &> /dev/null || error "Helm not found" + +# Check Azure login +if ! az account show &> /dev/null; then + error "Not logged into Azure. Run 'az login' first." +fi + +# Get Fleet members +log "Getting Fleet members..." +MEMBERS=$(az fleet member list --resource-group "$RESOURCE_GROUP" --fleet-name "$FLEET_NAME" --query "[].name" -o tsv) +if [[ -z "$MEMBERS" ]]; then + error "No Fleet members found. Run setup-fleet-hub.sh and setup-arc-member.sh first." +fi + +echo "Fleet members found:" +echo "$MEMBERS" | while read -r member; do echo " - $member"; done + +# Deploy DocumentDB operator to each member +log "Deploying DocumentDB operator to all Fleet members..." + +for MEMBER in $MEMBERS; do + echo "" + log "=== Deploying to: $MEMBER ===" + + # Get credentials based on cluster type + if az aks show -g "$RESOURCE_GROUP" -n "$MEMBER" &>/dev/null; then + log "Getting AKS credentials for $MEMBER..." + az aks get-credentials -g "$RESOURCE_GROUP" -n "$MEMBER" --overwrite-existing + elif az connectedk8s show -g "$RESOURCE_GROUP" -n "$MEMBER" &>/dev/null; then + log "Using Kind context for $MEMBER..." + kubectl config use-context "kind-$MEMBER" 2>/dev/null || \ + kubectl config use-context "$MEMBER" 2>/dev/null || \ + warn "Could not switch context for $MEMBER - ensure kubeconfig is set" + else + warn "Unknown cluster type for $MEMBER, skipping..." + continue + fi + + # Verify connectivity + if ! kubectl cluster-info &>/dev/null; then + warn "Cannot connect to $MEMBER, skipping..." + continue + fi + + # Check if operator already installed + if helm list -n documentdb-operator 2>/dev/null | grep -q documentdb-operator; then + log "DocumentDB operator already installed on $MEMBER, upgrading..." + helm upgrade documentdb-operator oci://ghcr.io/documentdb/documentdb-helm-chart \ + --namespace documentdb-operator \ + --wait --timeout 5m + else + log "Installing DocumentDB operator on $MEMBER..." + helm install documentdb-operator oci://ghcr.io/documentdb/documentdb-helm-chart \ + --namespace documentdb-operator --create-namespace \ + --wait --timeout 5m + fi + success "DocumentDB operator ready on $MEMBER" +done + +# Deploy DocumentDB instances if requested +if [[ "$DEPLOY_INSTANCES" == "true" ]]; then + echo "" + log "Deploying DocumentDB instances..." + + for MEMBER in $MEMBERS; do + log "=== Deploying instance on: $MEMBER ===" + + # Get credentials + if az aks show -g "$RESOURCE_GROUP" -n "$MEMBER" &>/dev/null; then + az aks get-credentials -g "$RESOURCE_GROUP" -n "$MEMBER" --overwrite-existing + else + kubectl config use-context "kind-$MEMBER" 2>/dev/null || \ + kubectl config use-context "$MEMBER" 2>/dev/null || continue + fi + + # Check if instance exists + if kubectl get documentdb "documentdb-$MEMBER" &>/dev/null; then + log "DocumentDB instance already exists on $MEMBER" + continue + fi + + # Deploy instance + kubectl apply -f - </dev/null; then + az aks get-credentials -g "$RESOURCE_GROUP" -n "$MEMBER" --overwrite-existing 2>/dev/null + else + kubectl config use-context "kind-$MEMBER" 2>/dev/null || \ + kubectl config use-context "$MEMBER" 2>/dev/null || continue + fi + + echo "Operator pods:" + kubectl get pods -n documentdb-operator --no-headers 2>/dev/null || echo " (no pods)" + + echo "DocumentDB instances:" + kubectl get documentdb -A --no-headers 2>/dev/null || echo " (no instances)" + echo "" +done + +# Summary +echo "==============================================" +success "DocumentDB Deployment Complete!" +echo "==============================================" +echo "" +echo "All Fleet members now have:" +echo " ✅ DocumentDB operator installed" +if [[ "$DEPLOY_INSTANCES" == "true" ]]; then + echo " ✅ DocumentDB cluster instance running" +fi +echo "" +echo "View in Azure Portal:" +echo " Fleet Manager -> $FLEET_NAME -> Members" +echo "" +echo "No extension registration was required!" +echo "" diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/documentdb-instance.yaml b/documentdb-playground/arc-hybrid-setup-with-fleet/documentdb-instance.yaml new file mode 100644 index 00000000..79d968f1 --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/documentdb-instance.yaml @@ -0,0 +1,18 @@ +apiVersion: documentdb.io/preview +kind: DocumentDB +metadata: + name: demo-documentdb + namespace: app-namespace +spec: + nodeCount: 1 + instancesPerNode: 1 + documentDBImage: ghcr.io/microsoft/documentdb/documentdb-local:16 + gatewayImage: ghcr.io/microsoft/documentdb/documentdb-local:16 + documentDbCredentialSecret: documentdb-credentials + resource: + storage: + pvcSize: 5Gi + exposeViaService: + serviceType: ClusterIP + logLevel: info + sidecarInjectorPluginName: cnpg-i-sidecar-injector.documentdb.io diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.ps1 b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.ps1 new file mode 100644 index 00000000..a572275c --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.ps1 @@ -0,0 +1,214 @@ +# Setup Arc-enabled on-prem cluster and join to Fleet +# Creates Kind cluster (in WSL), Arc-enables it, and joins to existing Fleet +# PowerShell version for Azure CLI commands + +param( + [string]$ResourceGroup = "documentdb-fleet-rg", + [string]$Location = "westus2", + [string]$FleetName = "documentdb-fleet", + [string]$ArcCluster = "documentdb-onprem", + [switch]$Help +) + +# Colors +function Write-Log { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] $Message" -ForegroundColor Cyan } +function Write-Success { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] ✅ $Message" -ForegroundColor Green } +function Write-Warn { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] ⚠️ $Message" -ForegroundColor Yellow } +function Write-Err { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] ❌ $Message" -ForegroundColor Red; exit 1 } + +if ($Help) { + Write-Host @" +Usage: .\setup-arc-member.ps1 [OPTIONS] + +Arc-enables a Kind cluster and joins it to Fleet. + +IMPORTANT: Run Kind cluster creation in WSL first: + kind create cluster --name documentdb-onprem + +Parameters: + -ResourceGroup Azure resource group (default: documentdb-fleet-rg) + -Location Azure region (default: westus2) + -FleetName Fleet hub name (default: documentdb-fleet) + -ArcCluster Arc cluster name (default: documentdb-onprem) + -Help Show this help message +"@ + exit 0 +} + +Write-Host "" +Write-Host "==============================================" +Write-Host "Arc-Enabled Cluster Setup (PowerShell)" +Write-Host "==============================================" +Write-Host "" + +# Set KUBECONFIG to WSL path +$WslUser = $env:USERNAME +$env:KUBECONFIG = "\\wsl.localhost\Ubuntu\home\$WslUser\.kube\config" +Write-Log "Using WSL kubeconfig: $env:KUBECONFIG" + +# Check prerequisites +Write-Log "Checking prerequisites..." +if (-not (Get-Command az -ErrorAction SilentlyContinue)) { Write-Err "Azure CLI not found" } +if (-not (Get-Command kubectl -ErrorAction SilentlyContinue)) { Write-Err "kubectl not found" } + +# Check Azure login +try { + $null = az account show 2>$null + if ($LASTEXITCODE -ne 0) { throw } +} catch { + Write-Err "Not logged into Azure. Run 'az login' first." +} + +# Install connectedk8s extension +Write-Log "Checking Azure CLI extensions..." +az extension add --name connectedk8s --upgrade --yes 2>$null +az extension add --name fleet --upgrade --yes 2>$null + +# Verify kubectl context +Write-Log "Verifying kubeconfig context..." +$currentContext = kubectl config current-context 2>$null +Write-Log "Current context: $currentContext" + +# Check if Kind cluster exists and we can connect +$nodes = kubectl get nodes -o name 2>$null +if (-not $nodes) { + Write-Host "" + Write-Warn "Cannot connect to Kubernetes cluster!" + Write-Host "" + Write-Host "Please run this in WSL first:" -ForegroundColor Yellow + Write-Host "" + Write-Host " kind create cluster --name $ArcCluster --config - <$null + if ($LASTEXITCODE -ne 0) { + Write-Warn "Could not switch context. Proceeding with current context: $currentContext" + } +} + +# Verify Fleet exists +Write-Log "Verifying Fleet hub exists..." +$fleetCheck = az fleet show --resource-group $ResourceGroup --name $FleetName 2>$null +if ($LASTEXITCODE -ne 0) { + Write-Err "Fleet hub '$FleetName' not found. Run setup-fleet-hub.ps1 first." +} +Write-Success "Fleet hub found" + +# Check if Arc cluster already exists +Write-Log "Checking for existing Arc cluster..." +$existingArc = az connectedk8s show -g $ResourceGroup -n $ArcCluster 2>$null +if ($LASTEXITCODE -eq 0) { + Write-Warn "Arc cluster '$ArcCluster' already exists. Delete it first if you want to recreate." + Write-Host "To delete: az connectedk8s delete --name $ArcCluster --resource-group $ResourceGroup --yes" + + $connectivityStatus = az connectedk8s show -g $ResourceGroup -n $ArcCluster --query connectivityStatus -o tsv + if ($connectivityStatus -eq "Connected") { + Write-Success "Arc cluster is connected. Skipping Arc-enable step." + } else { + Write-Err "Arc cluster exists but is not connected. Delete and recreate." + } +} else { + # Arc-enable the cluster + Write-Log "Arc-enabling cluster (this takes 2-3 minutes)..." + az connectedk8s connect ` + --name $ArcCluster ` + --resource-group $ResourceGroup ` + --location $Location ` + --tags "environment=onprem" "purpose=documentdb" "fleet=$FleetName" "cluster-type=kind" + + if ($LASTEXITCODE -ne 0) { Write-Err "Failed to Arc-enable cluster" } + Write-Success "Cluster Arc-enabled" +} + +# Verify Arc connection +Write-Log "Verifying Arc connection..." +az connectedk8s show --name $ArcCluster --resource-group $ResourceGroup ` + --query "{name:name, connectivityStatus:connectivityStatus, kubernetesVersion:kubernetesVersion}" -o table + +# Check if already a Fleet member +Write-Log "Checking Fleet membership..." +$existingMember = az fleet member show --resource-group $ResourceGroup --fleet-name $FleetName --name $ArcCluster 2>$null +if ($LASTEXITCODE -eq 0) { + Write-Success "Already a Fleet member" +} else { + # Join Arc cluster to Fleet + Write-Log "Joining Arc cluster to Fleet..." + $ArcId = az connectedk8s show -g $ResourceGroup -n $ArcCluster --query id -o tsv + az fleet member create ` + --resource-group $ResourceGroup ` + --fleet-name $FleetName ` + --name $ArcCluster ` + --member-cluster-id $ArcId + + if ($LASTEXITCODE -ne 0) { Write-Err "Failed to join Arc cluster to Fleet" } + Write-Success "Arc cluster joined to Fleet" +} + +# Show Arc agent pods +Write-Log "Arc agent pods:" +kubectl get pods -n azure-arc + +# Create service account for portal viewing +Write-Log "Creating service account for Azure Portal access..." +kubectl create serviceaccount arc-portal-viewer -n default 2>$null +kubectl create clusterrolebinding arc-portal-viewer-binding ` + --clusterrole=cluster-admin ` + --serviceaccount=default:arc-portal-viewer 2>$null + +Write-Log "Generating bearer token for Azure Portal..." +$BearerToken = kubectl create token arc-portal-viewer -n default --duration=8760h + +# Summary +Write-Host "" +Write-Host "==============================================" +Write-Success "Arc-Enabled Member Setup Complete!" +Write-Host "==============================================" +Write-Host "" +Write-Host "Cluster Details:" +Write-Host " Resource Group: $ResourceGroup" +Write-Host " Fleet Hub: $FleetName" +Write-Host " Arc Cluster Name: $ArcCluster" +Write-Host " Azure Location: $Location" +Write-Host "" +Write-Host "Fleet Members:" +az fleet member list --resource-group $ResourceGroup --fleet-name $FleetName -o table +Write-Host "" +$SubscriptionId = az account show --query id -o tsv +Write-Host "Azure Portal Links:" +Write-Host " Arc Cluster: https://portal.azure.com/#@/resource/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroup/providers/Microsoft.Kubernetes/connectedClusters/$ArcCluster/overview" +Write-Host "" +Write-Host "==============================================" +Write-Host "BEARER TOKEN FOR AZURE PORTAL" -ForegroundColor Yellow +Write-Host "==============================================" +Write-Host "Use this token in Azure Portal to view Kubernetes resources:" +Write-Host "1. Go to Arc cluster -> Kubernetes resources" +Write-Host "2. Click 'Sign in with service account token'" +Write-Host "3. Paste this token:" +Write-Host "" +Write-Host $BearerToken -ForegroundColor Green +Write-Host "" +Write-Host "==============================================" +Write-Host "" +Write-Host "Next steps (run in WSL):" +Write-Host "1. Install cert-manager: kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml" +Write-Host "2. Deploy DocumentDB: helm install documentdb-operator ./operator/documentdb-helm-chart --namespace documentdb-operator --create-namespace" +Write-Host "" + +# Save token to file +$BearerToken | Out-File -FilePath ".arc-portal-token.txt" -Encoding UTF8 +Write-Log "Token saved to .arc-portal-token.txt" diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.sh b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.sh new file mode 100755 index 00000000..cb94185a --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-arc-member.sh @@ -0,0 +1,200 @@ +#!/bin/bash +# Setup Arc-enabled on-prem cluster and join to Fleet +# Creates Kind cluster, Arc-enables it, and joins to existing Fleet + +set -e + +# Load environment from previous script if available +[[ -f .fleet-env ]] && source .fleet-env + +# Configuration (can be overridden via environment variables) +RESOURCE_GROUP="${RESOURCE_GROUP:-documentdb-fleet-rg}" +LOCATION="${LOCATION:-eastus}" +FLEET_NAME="${FLEET_NAME:-documentdb-fleet}" +ARC_CLUSTER="${ARC_CLUSTER:-documentdb-onprem}" +CLUSTER_TYPE="${CLUSTER_TYPE:-kind}" # kind, k3d, or existing + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } +success() { echo -e "${GREEN}[$(date +'%H:%M:%S')] ✅ $1${NC}"; } +warn() { echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠️ $1${NC}"; } +error() { echo -e "${RED}[$(date +'%H:%M:%S')] ❌ $1${NC}"; exit 1; } + +# Help +if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Creates Arc-enabled on-prem cluster and joins to Fleet" + echo "" + echo "Environment variables:" + echo " RESOURCE_GROUP Azure resource group (default: documentdb-fleet-rg)" + echo " LOCATION Azure region for Arc metadata (default: eastus)" + echo " FLEET_NAME Fleet hub name (default: documentdb-fleet)" + echo " ARC_CLUSTER Arc cluster name (default: documentdb-onprem)" + echo " CLUSTER_TYPE kind, k3d, or existing (default: kind)" + exit 0 +fi + +# Check prerequisites +log "Checking prerequisites..." +command -v az &> /dev/null || error "Azure CLI not found" +command -v kubectl &> /dev/null || error "kubectl not found" +command -v helm &> /dev/null || error "Helm not found" + +if [[ "$CLUSTER_TYPE" == "kind" ]]; then + command -v kind &> /dev/null || error "Kind not found. Install: https://kind.sigs.k8s.io/docs/user/quick-start/" + command -v docker &> /dev/null || error "Docker not found" +fi + +# Check Azure login +if ! az account show &> /dev/null; then + error "Not logged into Azure. Run 'az login' first." +fi + +# Install required extensions +log "Checking Azure CLI extensions..." +az extension add --name connectedk8s --upgrade --yes 2>/dev/null || true +az extension add --name fleet --upgrade --yes 2>/dev/null || true + +# Register providers +log "Registering Azure providers (if needed)..." +az provider register --namespace Microsoft.Kubernetes --wait 2>/dev/null || true +az provider register --namespace Microsoft.KubernetesConfiguration --wait 2>/dev/null || true + +# Verify Fleet exists +log "Verifying Fleet hub exists..." +if ! az fleet show --resource-group "$RESOURCE_GROUP" --name "$FLEET_NAME" &>/dev/null; then + error "Fleet hub '$FLEET_NAME' not found. Run ./setup-fleet-hub.sh first." +fi +success "Fleet hub found" + +# Create local cluster +if [[ "$CLUSTER_TYPE" == "kind" ]]; then + log "Creating Kind cluster: $ARC_CLUSTER..." + + # Delete existing if present + kind delete cluster --name "$ARC_CLUSTER" 2>/dev/null || true + + kind create cluster --name "$ARC_CLUSTER" --config - </dev/null || true + k3d cluster create "$ARC_CLUSTER" --agents 2 + success "k3d cluster created" + +elif [[ "$CLUSTER_TYPE" == "existing" ]]; then + log "Using existing cluster context..." + kubectl cluster-info || error "Cannot connect to existing cluster" +else + error "Unknown CLUSTER_TYPE: $CLUSTER_TYPE" +fi + +# Verify cluster +log "Verifying cluster connectivity..." +kubectl cluster-info +kubectl get nodes + +# Arc-enable the cluster +log "Arc-enabling cluster (connecting to Azure)..." +az connectedk8s connect \ + --name "$ARC_CLUSTER" \ + --resource-group "$RESOURCE_GROUP" \ + --location "$LOCATION" \ + --tags environment=onprem purpose=documentdb fleet="$FLEET_NAME" cluster-type="$CLUSTER_TYPE" +success "Cluster Arc-enabled" + +# Verify Arc connection +log "Verifying Arc connection..." +az connectedk8s show --name "$ARC_CLUSTER" --resource-group "$RESOURCE_GROUP" \ + --query "{name:name, connectivityStatus:connectivityStatus}" -o table + +# Join Arc cluster to Fleet +log "Joining Arc cluster to Fleet..." +ARC_ID=$(az connectedk8s show -g "$RESOURCE_GROUP" -n "$ARC_CLUSTER" --query id -o tsv) +az fleet member create \ + --resource-group "$RESOURCE_GROUP" \ + --fleet-name "$FLEET_NAME" \ + --name "$ARC_CLUSTER" \ + --member-cluster-id "$ARC_ID" \ + --output none +success "Arc cluster joined to Fleet" + +# Install cert-manager on Arc cluster +log "Installing cert-manager on Arc cluster..." +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml +log "Waiting for cert-manager to be ready..." +kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=300s +success "cert-manager installed" + +# Show Arc agent pods +log "Arc agent pods:" +kubectl get pods -n azure-arc + +# Create service account for Azure Portal viewing +log "Creating service account for Azure Portal access..." +kubectl create serviceaccount arc-portal-viewer -n default 2>/dev/null || true +kubectl create clusterrolebinding arc-portal-viewer-binding \ + --clusterrole=cluster-admin \ + --serviceaccount=default:arc-portal-viewer 2>/dev/null || true + +log "Generating bearer token for Azure Portal (valid for 1 year)..." +BEARER_TOKEN=$(kubectl create token arc-portal-viewer -n default --duration=8760h) +success "Bearer token generated" + +# Summary +echo "" +echo "==============================================" +success "Arc-Enabled Member Setup Complete!" +echo "==============================================" +echo "" +echo "Cluster Details:" +echo " Resource Group: $RESOURCE_GROUP" +echo " Fleet Hub: $FLEET_NAME" +echo " Arc Cluster Name: $ARC_CLUSTER" +echo " Cluster Type: $CLUSTER_TYPE" +echo " Azure Location: $LOCATION (metadata only)" +echo "" +echo "Fleet Members:" +az fleet member list --resource-group "$RESOURCE_GROUP" --fleet-name "$FLEET_NAME" -o table +echo "" +echo "Azure Portal Links:" +SUBSCRIPTION_ID=$(az account show --query id -o tsv) +echo " Arc Cluster: https://portal.azure.com/#@/resource/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Kubernetes/connectedClusters/$ARC_CLUSTER/overview" +echo "" +echo "==============================================" +echo "BEARER TOKEN FOR AZURE PORTAL" +echo "==============================================" +echo "Use this token in Azure Portal to view Kubernetes resources:" +echo "1. Go to Arc cluster -> Kubernetes resources" +echo "2. Click 'Sign in with service account token'" +echo "3. Paste this token:" +echo "" +echo "$BEARER_TOKEN" +echo "" +echo "==============================================" +echo "" +echo "Next step: Run ./deploy-documentdb-fleet.sh to deploy DocumentDB" +echo "" + +# Update environment file +echo "export ARC_CLUSTER=$ARC_CLUSTER" >> .fleet-env +echo "export BEARER_TOKEN='$BEARER_TOKEN'" >> .fleet-env + diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.ps1 b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.ps1 new file mode 100644 index 00000000..4b9a036c --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.ps1 @@ -0,0 +1,150 @@ +# Setup Azure Fleet Manager hub + AKS member cluster +# Creates Fleet hub and AKS cluster, then joins AKS to Fleet +# PowerShell version for Windows + +param( + [string]$ResourceGroup = "documentdb-fleet-rg", + [string]$Location = "eastus", + [string]$FleetName = "documentdb-fleet", + [string]$AksCluster = "documentdb-aks", + [int]$NodeCount = 2, + [string]$NodeSize = "Standard_D4s_v3", + [switch]$Help +) + +# Colors +function Write-Log { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] $Message" -ForegroundColor Cyan } +function Write-Success { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] ✅ $Message" -ForegroundColor Green } +function Write-Warn { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] ⚠️ $Message" -ForegroundColor Yellow } +function Write-Err { param([string]$Message) Write-Host "[$(Get-Date -Format 'HH:mm:ss')] ❌ $Message" -ForegroundColor Red; exit 1 } + +if ($Help) { + Write-Host @" +Usage: .\setup-fleet-hub.ps1 [OPTIONS] + +Creates Azure Fleet Manager hub and AKS member cluster + +Parameters: + -ResourceGroup Azure resource group (default: documentdb-fleet-rg) + -Location Azure region (default: eastus) + -FleetName Fleet hub name (default: documentdb-fleet) + -AksCluster AKS cluster name (default: documentdb-aks) + -NodeCount Number of nodes (default: 2) + -NodeSize VM size (default: Standard_D4s_v3) + -Help Show this help message +"@ + exit 0 +} + +# Check prerequisites +Write-Log "Checking prerequisites..." +if (-not (Get-Command az -ErrorAction SilentlyContinue)) { Write-Err "Azure CLI not found" } +if (-not (Get-Command kubectl -ErrorAction SilentlyContinue)) { Write-Err "kubectl not found" } +if (-not (Get-Command helm -ErrorAction SilentlyContinue)) { Write-Err "Helm not found" } + +# Check Azure login +try { + $null = az account show 2>$null + if ($LASTEXITCODE -ne 0) { throw } +} catch { + Write-Err "Not logged into Azure. Run 'az login' first." +} + +# Install Fleet extension +Write-Log "Checking Azure CLI Fleet extension..." +az extension add --name fleet --upgrade --yes 2>$null + +$Subscription = az account show --query name -o tsv +Write-Log "Using Azure subscription: $Subscription" + +# Create resource group +Write-Log "Creating resource group: $ResourceGroup in $Location..." +az group create --name $ResourceGroup --location $Location --output none +if ($LASTEXITCODE -ne 0) { Write-Err "Failed to create resource group" } +Write-Success "Resource group created" + +# Create Fleet hub +Write-Log "Creating Azure Fleet Manager hub: $FleetName..." +az fleet create --resource-group $ResourceGroup --name $FleetName --location $Location --output none +if ($LASTEXITCODE -ne 0) { Write-Err "Failed to create Fleet hub" } +Write-Success "Fleet hub created" + +# Create AKS cluster +Write-Log "Creating AKS cluster: $AksCluster (this takes ~5-10 minutes)..." +az aks create ` + --resource-group $ResourceGroup ` + --name $AksCluster ` + --node-count $NodeCount ` + --node-vm-size $NodeSize ` + --enable-managed-identity ` + --generate-ssh-keys ` + --tags purpose=documentdb environment=aks fleet=$FleetName ` + --output none +if ($LASTEXITCODE -ne 0) { Write-Err "Failed to create AKS cluster" } +Write-Success "AKS cluster created" + +# Wait for AKS to be fully ready +Write-Log "Waiting for AKS cluster to be ready..." +do { + Start-Sleep -Seconds 10 + $state = az aks show -g $ResourceGroup -n $AksCluster --query provisioningState -o tsv + Write-Log "AKS state: $state" +} while ($state -eq "Updating") + +# Join AKS to Fleet +Write-Log "Joining AKS cluster to Fleet..." +$AksId = az aks show -g $ResourceGroup -n $AksCluster --query id -o tsv +az fleet member create ` + --resource-group $ResourceGroup ` + --fleet-name $FleetName ` + --name $AksCluster ` + --member-cluster-id $AksId ` + --output none +if ($LASTEXITCODE -ne 0) { Write-Err "Failed to join AKS to Fleet" } +Write-Success "AKS joined to Fleet" + +# Get AKS credentials +Write-Log "Getting AKS cluster credentials..." +az aks get-credentials --resource-group $ResourceGroup --name $AksCluster --overwrite-existing + +# Verify connectivity +Write-Log "Verifying AKS cluster connectivity..." +kubectl cluster-info +kubectl get nodes + +# Install cert-manager on AKS +Write-Log "Installing cert-manager on AKS..." +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml +Write-Log "Waiting for cert-manager to be ready..." +kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=300s +Write-Success "cert-manager installed on AKS" + +# Summary +Write-Host "" +Write-Host "==============================================" +Write-Success "Fleet Hub + AKS Member Setup Complete!" +Write-Host "==============================================" +Write-Host "" +Write-Host "Fleet Details:" +Write-Host " Resource Group: $ResourceGroup" +Write-Host " Fleet Hub: $FleetName" +Write-Host " AKS Member: $AksCluster" +Write-Host " Location: $Location" +Write-Host "" +Write-Host "Azure Portal Links:" +$SubscriptionId = az account show --query id -o tsv +Write-Host " Fleet Hub: https://portal.azure.com/#@/resource/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroup/providers/Microsoft.ContainerService/fleets/$FleetName/overview" +Write-Host " AKS: https://portal.azure.com/#@/resource/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroup/providers/Microsoft.ContainerService/managedClusters/$AksCluster/overview" +Write-Host "" +Write-Host "Next step: Run .\setup-arc-member.ps1 to add Arc-enabled on-prem cluster" +Write-Host "" + +# Export variables for next script +$envContent = @" +`$env:RESOURCE_GROUP = "$ResourceGroup" +`$env:LOCATION = "$Location" +`$env:FLEET_NAME = "$FleetName" +`$env:AKS_CLUSTER = "$AksCluster" +"@ +$envContent | Out-File -FilePath ".fleet-env.ps1" -Encoding UTF8 +Write-Log "Variables saved to .fleet-env.ps1 (dot-source it for next scripts: . .\.fleet-env.ps1)" diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.sh b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.sh new file mode 100755 index 00000000..cdecd5c8 --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/setup-fleet-hub.sh @@ -0,0 +1,149 @@ +#!/bin/bash +# Setup Azure Fleet Manager hub + AKS member cluster +# Creates Fleet hub and AKS cluster, then joins AKS to Fleet + +set -e + +# WSL fix: Use separate Azure config to avoid Windows/Linux CLI conflicts +export AZURE_CONFIG_DIR="${AZURE_CONFIG_DIR:-$HOME/azure-linux}" + +# Configuration (can be overridden via environment variables) +RESOURCE_GROUP="${RESOURCE_GROUP:-documentdb-fleet-rg}" +LOCATION="${LOCATION:-eastus}" +FLEET_NAME="${FLEET_NAME:-documentdb-fleet}" +AKS_CLUSTER="${AKS_CLUSTER:-documentdb-aks}" +NODE_COUNT="${NODE_COUNT:-2}" +NODE_SIZE="${NODE_SIZE:-Standard_D4s_v3}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } +success() { echo -e "${GREEN}[$(date +'%H:%M:%S')] ✅ $1${NC}"; } +warn() { echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠️ $1${NC}"; } +error() { echo -e "${RED}[$(date +'%H:%M:%S')] ❌ $1${NC}"; exit 1; } + +# Help +if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Creates Azure Fleet Manager hub and AKS member cluster" + echo "" + echo "Environment variables:" + echo " RESOURCE_GROUP Azure resource group (default: documentdb-fleet-rg)" + echo " LOCATION Azure region (default: eastus)" + echo " FLEET_NAME Fleet hub name (default: documentdb-fleet)" + echo " AKS_CLUSTER AKS cluster name (default: documentdb-aks)" + echo " NODE_COUNT Number of nodes (default: 2)" + echo " NODE_SIZE VM size (default: Standard_D4s_v3)" + exit 0 +fi + +# Check prerequisites +log "Checking prerequisites..." +log "DEBUG: AZURE_CONFIG_DIR=$AZURE_CONFIG_DIR" +log "DEBUG: which az = $(which az)" +log "DEBUG: az version = $(az --version 2>&1 | head -1)" +command -v az &> /dev/null || error "Azure CLI not found" +command -v kubectl &> /dev/null || error "kubectl not found" +command -v helm &> /dev/null || error "Helm not found" + +# Check Azure login +log "DEBUG: Running az account show..." +az account show 2>&1 | head -5 +if ! az account show &> /dev/null; then + log "DEBUG: az account show failed" + error "Not logged into Azure. Run 'az login' first." +fi + +# # Install Fleet extension +# log "Checking Azure CLI Fleet extension..." +# az extension add --name fleet --upgrade --yes 2>/dev/null || true + +# SUBSCRIPTION=$(az account show --query name -o tsv) +# log "Using Azure subscription: $SUBSCRIPTION" + +# # Create resource group +# log "Creating resource group: $RESOURCE_GROUP in $LOCATION..." +# az group create --name "$RESOURCE_GROUP" --location "$LOCATION" --output none +# success "Resource group created" + +# Create Fleet hub +log "Creating Azure Fleet Manager hub: $FLEET_NAME..." +az fleet create \ + --resource-group "$RESOURCE_GROUP" \ + --name "$FLEET_NAME" \ + --location "$LOCATION" \ + --output none +success "Fleet hub created" + +# Create AKS cluster +log "Creating AKS cluster: $AKS_CLUSTER (this takes ~5-10 minutes)..." +az aks create \ + --resource-group "$RESOURCE_GROUP" \ + --name "$AKS_CLUSTER" \ + --node-count "$NODE_COUNT" \ + --node-vm-size "$NODE_SIZE" \ + --enable-managed-identity \ + --generate-ssh-keys \ + --tags purpose=documentdb environment=aks fleet="$FLEET_NAME" \ + --output none +success "AKS cluster created" + +# Join AKS to Fleet +log "Joining AKS cluster to Fleet..." +AKS_ID=$(az aks show -g "$RESOURCE_GROUP" -n "$AKS_CLUSTER" --query id -o tsv) +az fleet member create \ + --resource-group "$RESOURCE_GROUP" \ + --fleet-name "$FLEET_NAME" \ + --name "$AKS_CLUSTER" \ + --member-cluster-id "$AKS_ID" \ + --output none +success "AKS joined to Fleet" + +# Get AKS credentials +log "Getting AKS cluster credentials..." +az aks get-credentials --resource-group "$RESOURCE_GROUP" --name "$AKS_CLUSTER" --overwrite-existing + +# Verify connectivity +log "Verifying AKS cluster connectivity..." +kubectl cluster-info +kubectl get nodes + +# Install cert-manager on AKS +log "Installing cert-manager on AKS..." +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml +log "Waiting for cert-manager to be ready..." +kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=300s +success "cert-manager installed on AKS" + +# Summary +echo "" +echo "==============================================" +success "Fleet Hub + AKS Member Setup Complete!" +echo "==============================================" +echo "" +echo "Fleet Details:" +echo " Resource Group: $RESOURCE_GROUP" +echo " Fleet Hub: $FLEET_NAME" +echo " AKS Member: $AKS_CLUSTER" +echo " Location: $LOCATION" +echo "" +echo "Azure Portal Links:" +SUBSCRIPTION_ID=$(az account show --query id -o tsv) +echo " Fleet Hub: https://portal.azure.com/#@/resource/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.ContainerService/fleets/$FLEET_NAME/overview" +echo " AKS: https://portal.azure.com/#@/resource/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.ContainerService/managedClusters/$AKS_CLUSTER/overview" +echo "" +echo "Next step: Run ./setup-arc-member.sh to add Arc-enabled on-prem cluster" +echo "" + +# Export variables for next script +echo "export RESOURCE_GROUP=$RESOURCE_GROUP" > .fleet-env +echo "export LOCATION=$LOCATION" >> .fleet-env +echo "export FLEET_NAME=$FLEET_NAME" >> .fleet-env +echo "export AKS_CLUSTER=$AKS_CLUSTER" >> .fleet-env +log "Variables saved to .fleet-env (source it for next scripts)" diff --git a/documentdb-playground/arc-hybrid-setup-with-fleet/verify-portal.sh b/documentdb-playground/arc-hybrid-setup-with-fleet/verify-portal.sh new file mode 100755 index 00000000..ec3a9e4a --- /dev/null +++ b/documentdb-playground/arc-hybrid-setup-with-fleet/verify-portal.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Verify Fleet-based DocumentDB deployment in Azure Portal +# Shows Fleet hub, members, and DocumentDB status + +set -e + +# Load environment if available +[[ -f .fleet-env ]] && source .fleet-env + +# Configuration +RESOURCE_GROUP="${RESOURCE_GROUP:-documentdb-fleet-rg}" +FLEET_NAME="${FLEET_NAME:-documentdb-fleet}" +AKS_CLUSTER="${AKS_CLUSTER:-documentdb-aks}" +ARC_CLUSTER="${ARC_CLUSTER:-documentdb-onprem}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' + +log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } +success() { echo -e "${GREEN}✅ $1${NC}"; } +warn() { echo -e "${YELLOW}⚠️ $1${NC}"; } +header() { echo -e "\n${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; echo -e "${CYAN}$1${NC}"; echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; } + +# Check prerequisites +command -v az &> /dev/null || { echo "Azure CLI not found"; exit 1; } + +# Check Azure login +if ! az account show &> /dev/null; then + echo "Not logged into Azure. Run 'az login' first." + exit 1 +fi + +SUBSCRIPTION_ID=$(az account show --query id -o tsv) +SUBSCRIPTION_NAME=$(az account show --query name -o tsv) + +header "Azure Subscription" +echo " Name: $SUBSCRIPTION_NAME" +echo " ID: $SUBSCRIPTION_ID" + +header "Azure Fleet Manager" +if az fleet show --resource-group "$RESOURCE_GROUP" --name "$FLEET_NAME" &>/dev/null; then + az fleet show --resource-group "$RESOURCE_GROUP" --name "$FLEET_NAME" \ + --query "{Name:name, State:provisioningState, Location:location}" \ + --output table + success "Fleet hub found" +else + warn "Fleet hub '$FLEET_NAME' not found in $RESOURCE_GROUP" +fi + +header "Fleet Members" +if az fleet member list --resource-group "$RESOURCE_GROUP" --fleet-name "$FLEET_NAME" &>/dev/null; then + az fleet member list --resource-group "$RESOURCE_GROUP" --fleet-name "$FLEET_NAME" \ + --query "[].{Name:name, State:provisioningState, ClusterId:clusterResourceId}" \ + --output table +else + warn "Could not list Fleet members" +fi + +header "AKS Cluster Status" +if az aks show --name "$AKS_CLUSTER" --resource-group "$RESOURCE_GROUP" &>/dev/null; then + az aks show --name "$AKS_CLUSTER" --resource-group "$RESOURCE_GROUP" \ + --query "{Name:name, State:provisioningState, K8sVersion:kubernetesVersion, NodeCount:agentPoolProfiles[0].count, Location:location}" \ + --output table + success "AKS cluster found" +else + warn "AKS cluster '$AKS_CLUSTER' not found in $RESOURCE_GROUP" +fi + +header "Arc-Enabled Cluster Status" +if az connectedk8s show --name "$ARC_CLUSTER" --resource-group "$RESOURCE_GROUP" &>/dev/null; then + az connectedk8s show --name "$ARC_CLUSTER" --resource-group "$RESOURCE_GROUP" \ + --query "{Name:name, Connectivity:connectivityStatus, K8sVersion:kubernetesVersion, AgentVersion:agentVersion, Location:location}" \ + --output table + success "Arc-enabled cluster found" +else + warn "Arc-enabled cluster '$ARC_CLUSTER' not found in $RESOURCE_GROUP" +fi + +header "DocumentDB Status on Each Cluster" +echo "" + +# Check AKS +if az aks show -g "$RESOURCE_GROUP" -n "$AKS_CLUSTER" &>/dev/null; then + log "=== $AKS_CLUSTER (AKS) ===" + az aks get-credentials -g "$RESOURCE_GROUP" -n "$AKS_CLUSTER" --overwrite-existing 2>/dev/null + echo "Operator:" + kubectl get pods -n documentdb-operator --no-headers 2>/dev/null || echo " (not installed)" + echo "Instances:" + kubectl get documentdb -A --no-headers 2>/dev/null || echo " (none)" + echo "" +fi + +# Check Arc +if kubectl config use-context "kind-$ARC_CLUSTER" &>/dev/null; then + log "=== $ARC_CLUSTER (Arc) ===" + echo "Operator:" + kubectl get pods -n documentdb-operator --no-headers 2>/dev/null || echo " (not installed)" + echo "Instances:" + kubectl get documentdb -A --no-headers 2>/dev/null || echo " (none)" + echo "" +fi + +header "Azure Portal Links" +echo "" +echo "Fleet Manager:" +echo " https://portal.azure.com/#@/resource/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.ContainerService/fleets/$FLEET_NAME/overview" +echo "" +echo "AKS Cluster:" +echo " https://portal.azure.com/#@/resource/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.ContainerService/managedClusters/$AKS_CLUSTER/overview" +echo "" +echo "Arc-Enabled Cluster:" +echo " https://portal.azure.com/#@/resource/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Kubernetes/connectedClusters/$ARC_CLUSTER/overview" +echo "" +echo "All Kubernetes Clusters:" +echo " https://portal.azure.com/#view/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/~/kubernetesServices" +echo "" + +success "Verification complete!" From fde54ca76b74460071f9762eac90320674224e97 Mon Sep 17 00:00:00 2001 From: Rayhan Hossain Date: Mon, 2 Mar 2026 11:04:59 -0800 Subject: [PATCH 4/4] Remove Arc Extension implementation folder for now Signed-off-by: Rayhan Hossain --- .../azure-arc/azure-arc-integration-plan.md | 8 +- operator/arc-extension/README.md | 340 ------------------ operator/arc-extension/extension.yaml | 139 ------- operator/arc-extension/test-arc-extension.sh | 325 ----------------- operator/arc-extension/values-arc.yaml | 112 ------ 5 files changed, 6 insertions(+), 918 deletions(-) delete mode 100644 operator/arc-extension/README.md delete mode 100644 operator/arc-extension/extension.yaml delete mode 100755 operator/arc-extension/test-arc-extension.sh delete mode 100644 operator/arc-extension/values-arc.yaml diff --git a/docs/designs/azure-arc/azure-arc-integration-plan.md b/docs/designs/azure-arc/azure-arc-integration-plan.md index 6f5b1b7a..ef5588f5 100644 --- a/docs/designs/azure-arc/azure-arc-integration-plan.md +++ b/docs/designs/azure-arc/azure-arc-integration-plan.md @@ -199,12 +199,14 @@ Create `extension.yaml` that tells Arc how to deploy the operator. **Files to create:** ``` operator/ -└── arc-extension/ +└── arc-extension/ # To be created in future PR ├── extension.yaml # K8s extension manifest ├── values-arc.yaml # Arc-specific Helm overrides └── README.md # Installation guide ``` +> **Status:** This folder will be created when extension registration begins. See [arc-hybrid-setup-with-fleet](../../../documentdb-playground/arc-hybrid-setup-with-fleet/) for current interim solution. + **Deliverable:** Working extension manifest pointing to ghcr.io --- @@ -343,8 +345,10 @@ oci://ghcr.io/documentdb/documentdb-operator:0.1.1 ### Extension Manifest Example +The following is a reference format for the extension manifest (to be created in future PR): + ```yaml -# operator/arc-extension/extension.yaml +# operator/arc-extension/extension.yaml (planned) extensionType: Microsoft.DocumentDB.Operator version: 0.1.3 diff --git a/operator/arc-extension/README.md b/operator/arc-extension/README.md deleted file mode 100644 index 370beb7b..00000000 --- a/operator/arc-extension/README.md +++ /dev/null @@ -1,340 +0,0 @@ -# DocumentDB Kubernetes Operator - Azure Extension - -Deploy DocumentDB Kubernetes Operator on any Kubernetes cluster using Azure extensions. - -## Overview - -This extension allows you to: -- Install DocumentDB Operator on **AKS** clusters (Azure-native) -- Install DocumentDB Operator on **any Kubernetes cluster** via Azure Arc (on-premises, edge, multi-cloud) -- View and manage the extension in Azure Portal -- Monitor extension health and status from Azure -- Unified billing across all cluster types (Phase 2) - -### Supported Cluster Types - -| Cluster Type | `--cluster-type` | Arc Agent Required? | -|--------------|------------------|--------------------| -| AKS (Azure) | `managedClusters` | No | -| EKS (AWS) | `connectedClusters` | Yes | -| GKE (GCP) | `connectedClusters` | Yes | -| On-premises | `connectedClusters` | Yes | - -## Prerequisites - -### For AKS Clusters - -- Azure subscription -- AKS cluster (v1.26+) -- Azure CLI with `aks` and `k8s-extension` extensions - -```bash -az extension add --name aks-preview -az extension add --name k8s-extension -``` - -### For Non-AKS Clusters (Arc-enabled) - -- Azure subscription -- Kubernetes cluster (v1.26+) -- Azure CLI with `connectedk8s` and `k8s-extension` extensions -- `kubectl` configured to access your cluster - -```bash -az extension add --name connectedk8s -az extension add --name k8s-extension -``` - -## Installation - -### Option 1: AKS Clusters (No Arc Agent Needed) - -```bash -# Login to Azure -az login -az account set --subscription - -# Install extension directly on AKS -az k8s-extension create \ - --name documentdb-operator \ - --extension-type Microsoft.DocumentDB.Operator \ - --cluster-name my-aks-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ - --release-train stable -``` - -Verify installation: -```bash -# Check extension status -az k8s-extension show \ - --name documentdb-operator \ - --cluster-name my-aks-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters - -# Check pods -kubectl get pods -n documentdb-operator -kubectl get pods -n cnpg-system -``` - -### Option 2: Arc-enabled Clusters (EKS, GKE, On-premises) - -#### Step 1: Connect Your Cluster to Azure Arc (One-time) - -```bash -# Login to Azure -az login -az account set --subscription - -# Create resource group (if needed) -az group create --name my-arc-rg --location eastus - -# Connect cluster to Azure Arc -az connectedk8s connect \ - --name my-cluster \ - --resource-group my-arc-rg -``` - -Verify Arc agent is running: -```bash -kubectl get pods -n azure-arc -``` - -#### Step 2: Install DocumentDB Extension - -```bash -az k8s-extension create \ - --name documentdb-operator \ - --extension-type Microsoft.DocumentDB.Operator \ - --cluster-name my-cluster \ - --resource-group my-arc-rg \ - --cluster-type connectedClusters \ - --release-train stable -``` - -#### Step 3: Verify Installation - -```bash -# Check extension status -az k8s-extension show \ - --name documentdb-operator \ - --cluster-name my-cluster \ - --resource-group my-arc-rg \ - --cluster-type connectedClusters - -# Check pods in cluster -kubectl get pods -n documentdb-operator -kubectl get pods -n cnpg-system -``` - -## Configuration Options - -> **Note:** For all examples below, use `--cluster-type managedClusters` for AKS or `--cluster-type connectedClusters` for Arc-enabled clusters. - -### Basic Configuration - -```bash -az k8s-extension create \ - --name documentdb-operator \ - --extension-type Microsoft.DocumentDB.Operator \ - --cluster-name my-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ # or connectedClusters - --configuration-settings documentDbVersion=0.1.3 \ - --configuration-settings replicaCount=1 -``` - -### Enable WAL Replica Feature - -```bash -az k8s-extension create \ - --name documentdb-operator \ - --extension-type Microsoft.DocumentDB.Operator \ - --cluster-name my-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ # or connectedClusters - --configuration-settings walReplica=true -``` - -### Private Registry Authentication - -If using a private container registry: - -```bash -az k8s-extension create \ - --name documentdb-operator \ - --extension-type Microsoft.DocumentDB.Operator \ - --cluster-name my-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ # or connectedClusters - --configuration-protected-settings registry.username= \ - --configuration-protected-settings registry.password= -``` - -## Managing the Extension - -### Check Extension Status - -```bash -# For AKS -az k8s-extension show \ - --name documentdb-operator \ - --cluster-name my-aks-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ - --output table - -# For Arc-enabled clusters -az k8s-extension show \ - --name documentdb-operator \ - --cluster-name my-arc-cluster \ - --resource-group my-rg \ - --cluster-type connectedClusters \ - --output table -``` - -### Upgrade Extension - -```bash -az k8s-extension update \ - --name documentdb-operator \ - --cluster-name my-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ # or connectedClusters - --version 0.1.4 -``` - -### Uninstall Extension - -```bash -az k8s-extension delete \ - --name documentdb-operator \ - --cluster-name my-cluster \ - --resource-group my-rg \ - --cluster-type managedClusters \ # or connectedClusters - --yes -``` - -### Query All Installations (Cross-Cluster) - -Use Azure Resource Graph to find all DocumentDB installations across your subscriptions: - -```bash -az graph query -q " - resources - | where type == 'microsoft.kubernetesconfiguration/extensions' - | where properties.extensionType == 'Microsoft.DocumentDB.Operator' - | extend clusterType = case( - id contains 'managedClusters', 'AKS', - id contains 'connectedClusters', 'Arc', - 'Unknown') - | project subscriptionId, resourceGroup, - clusterName=split(id,'/')[8], clusterType, - version=properties.version -" -``` - -## Deploying DocumentDB Instances - -After the operator is installed, deploy DocumentDB instances: - -```yaml -# documentdb-instance.yaml -apiVersion: v1 -kind: Namespace -metadata: - name: documentdb-ns ---- -apiVersion: v1 -kind: Secret -metadata: - name: documentdb-credentials - namespace: documentdb-ns -type: Opaque -stringData: - username: docdbadmin - password: YourSecurePassword123! ---- -apiVersion: documentdb.io/preview -kind: DocumentDB -metadata: - name: my-documentdb - namespace: documentdb-ns -spec: - nodeCount: 1 - instancesPerNode: 1 - documentDBImage: ghcr.io/microsoft/documentdb/documentdb-local:16 - gatewayImage: ghcr.io/microsoft/documentdb/documentdb-local:16 - documentDbCredentialSecret: documentdb-credentials - resource: - storage: - pvcSize: 10Gi -``` - -Apply the configuration: -```bash -kubectl apply -f documentdb-instance.yaml -``` - -## Azure Portal - -Once installed, you can view and manage the extension in Azure Portal: - -### For AKS Clusters -1. Navigate to **Kubernetes services** -2. Select your AKS cluster -3. Go to **Settings** > **Extensions** -4. Find **documentdb-operator** - -### For Arc-enabled Clusters -1. Navigate to **Azure Arc** > **Kubernetes clusters** -2. Select your cluster -3. Go to **Extensions** -4. Find **documentdb-operator** - -## Troubleshooting - -### Extension Installation Fails - -```bash -# Check extension status (use appropriate --cluster-type) -az k8s-extension show --name documentdb-operator \ - --cluster-name my-cluster --resource-group my-rg \ - --cluster-type managedClusters # or connectedClusters - -# For Arc-enabled clusters: Check Arc agent logs -kubectl logs -n azure-arc -l app.kubernetes.io/name=clusterconnect-agent - -# Check operator logs -kubectl logs -n documentdb-operator -l app.kubernetes.io/name=documentdb-operator -``` - -### Pods Not Starting - -```bash -# Check pod status -kubectl get pods -n documentdb-operator -o wide - -# Describe pod for events -kubectl describe pod -n documentdb-operator - -# Check CNPG operator -kubectl get pods -n cnpg-system -``` - -### Connectivity Issues - -Ensure outbound connectivity to: -- `ghcr.io` (port 443) - Container images - -**Additional for Arc-enabled clusters:** -- `*.servicebus.windows.net` (port 443) -- `*.guestconfiguration.azure.com` (port 443) - -## Support - -- [DocumentDB Operator Documentation](https://documentdb.io/documentdb-kubernetes-operator/preview/) -- [AKS Cluster Extensions](https://learn.microsoft.com/en-us/azure/aks/cluster-extensions) -- [Azure Arc Documentation](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/) -- [GitHub Issues](https://github.com/microsoft/documentdb-kubernetes-operator/issues) diff --git a/operator/arc-extension/extension.yaml b/operator/arc-extension/extension.yaml deleted file mode 100644 index 6a266a7a..00000000 --- a/operator/arc-extension/extension.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# Azure Extension Manifest for DocumentDB Kubernetes Operator -# -# This manifest tells Azure how to deploy the DocumentDB operator -# on Kubernetes clusters (AKS and Arc-enabled). -# -# Extension Type: Microsoft.DocumentDB.Operator -# Chart Source: OCI registry at ghcr.io/documentdb/documentdb-operator -# -# Supported cluster types: -# - managedClusters (AKS) - no Arc agent required -# - connectedClusters (Arc-enabled) - Arc agent required - -apiVersion: arc.azure.com/v1 -kind: ExtensionConfiguration -metadata: - name: documentdb-operator - namespace: documentdb-operator - -spec: - # Extension identity registered with Azure - extensionType: Microsoft.DocumentDB.Operator - - # Extension version (should match Helm chart version) - version: 0.1.3 - - # Release train configuration - releaseTrain: stable - autoUpgradeMinorVersion: true - - # Helm chart configuration - helm: - # OCI registry location (existing ghcr.io - no repackaging needed) - registryUrl: oci://ghcr.io/documentdb - chartName: documentdb-operator - chartVersion: "0.1.3" - - # Deployment configuration - releaseName: documentdb-operator - releaseNamespace: documentdb-operator - createNamespace: true - - # Default timeout for Helm operations - timeout: 10m - - # Values file for Arc-specific overrides - valuesFile: values-arc.yaml - - # User-configurable settings exposed via az k8s-extension create - # Example: az k8s-extension create ... --configuration-settings documentDbVersion=0.1.3 - configurationSettings: - - name: documentDbVersion - description: "DocumentDB operator version" - type: string - defaultValue: "0.1.3" - required: false - - - name: replicaCount - description: "Number of operator replicas" - type: integer - defaultValue: "1" - required: false - - - name: walReplica - description: "Enable WAL replica feature" - type: boolean - defaultValue: "false" - required: false - - # Protected settings (secrets) - not logged or displayed - configurationProtectedSettings: - - name: registry.username - description: "Container registry username (if private)" - type: string - required: false - - - name: registry.password - description: "Container registry password (if private)" - type: string - required: false - - # Health monitoring configuration - healthChecks: - # Primary operator deployment - - kind: Deployment - name: documentdb-operator - namespace: documentdb-operator - healthyThreshold: 1 - unhealthyThreshold: 3 - - # CNPG operator (dependency) - - kind: Deployment - name: cloudnative-pg - namespace: cnpg-system - healthyThreshold: 1 - unhealthyThreshold: 3 - - # Scope: cluster-wide (not namespace-scoped) - scope: - cluster: - releaseNamespace: documentdb-operator - - # Identity configuration for Azure resources access - identity: - type: SystemAssigned - - # Resource requirements for extension pods - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 500m - memory: 512Mi - ---- -# Release train definitions -apiVersion: arc.azure.com/v1 -kind: ReleaseTrainConfiguration -metadata: - name: documentdb-operator-release-trains - -spec: - releaseTrains: - # Stable release train - production ready - - name: stable - enabled: true - versions: - - "0.1.3" - - "0.1.2" - - "0.1.1" - defaultVersion: "0.1.3" - - # Preview release train - early access features - - name: preview - enabled: true - versions: - - "0.2.0-preview" - - "0.1.3" - defaultVersion: "0.1.3" diff --git a/operator/arc-extension/test-arc-extension.sh b/operator/arc-extension/test-arc-extension.sh deleted file mode 100755 index cc8456e5..00000000 --- a/operator/arc-extension/test-arc-extension.sh +++ /dev/null @@ -1,325 +0,0 @@ -#!/bin/bash - -# Azure Extension Test Script for DocumentDB Operator -# -# This script helps test the extension locally before Azure registration. -# It uses Kind clusters to simulate Arc-enabled clusters (connectedClusters). -# -# For AKS (managedClusters) testing, use an actual AKS cluster and run: -# az k8s-extension create --cluster-type managedClusters ... -# -# This script can: -# 1. Create a Kind cluster for testing -# 2. Connect the cluster to Azure Arc -# 3. Simulate extension deployment (before registration) -# 4. Test full Arc extension flow (after registration) -# -# Usage: -# ./test-arc-extension.sh --setup-kind # Create Kind cluster only -# ./test-arc-extension.sh --connect-arc # Connect to Azure Arc -# ./test-arc-extension.sh --simulate-install # Simulate extension install (no Arc registration) -# ./test-arc-extension.sh --install-extension # Install via az k8s-extension (requires registration) -# ./test-arc-extension.sh --cleanup # Delete Kind cluster - -set -e - -# Configuration -CLUSTER_NAME="${ARC_CLUSTER_NAME:-arc-test-cluster}" -RESOURCE_GROUP="${ARC_RESOURCE_GROUP:-arc-test-rg}" -LOCATION="${ARC_LOCATION:-eastus}" -CHART_VERSION="${CHART_VERSION:-0.1.3}" -GITHUB_ORG="${GITHUB_ORG:-documentdb}" - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -log() { echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1"; } -success() { echo -e "${GREEN}[$(date +'%H:%M:%S')] ✅ $1${NC}"; } -warn() { echo -e "${YELLOW}[$(date +'%H:%M:%S')] ⚠️ $1${NC}"; } -error() { echo -e "${RED}[$(date +'%H:%M:%S')] ❌ $1${NC}"; exit 1; } - -# Check prerequisites -check_prerequisites() { - log "Checking prerequisites..." - - command -v kubectl &>/dev/null || error "kubectl not found" - command -v helm &>/dev/null || error "helm not found" - command -v az &>/dev/null || error "Azure CLI not found" - - success "Prerequisites met" -} - -# Create Kind cluster -setup_kind() { - log "Setting up Kind cluster: $CLUSTER_NAME" - - command -v kind &>/dev/null || error "kind not found. Install: https://kind.sigs.k8s.io/" - - if kind get clusters | grep -q "^${CLUSTER_NAME}$"; then - warn "Cluster $CLUSTER_NAME already exists" - else - cat </dev/null || error "Not logged into Azure. Run: az login" - - # Install/update extensions - log "Installing Azure CLI extensions..." - az extension add --name connectedk8s --upgrade --yes 2>/dev/null || true - az extension add --name k8s-extension --upgrade --yes 2>/dev/null || true - - # Create resource group - log "Creating resource group: $RESOURCE_GROUP" - az group create --name "$RESOURCE_GROUP" --location "$LOCATION" --output none 2>/dev/null || true - - # Connect to Arc - log "Connecting to Azure Arc (this may take a few minutes)..." - az connectedk8s connect \ - --name "$CLUSTER_NAME" \ - --resource-group "$RESOURCE_GROUP" \ - --location "$LOCATION" - - # Verify - log "Verifying Arc agent..." - kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=clusterconnect-agent -n azure-arc --timeout=300s - - success "Cluster connected to Azure Arc" - log "View in portal: https://portal.azure.com/#view/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/~/overview" -} - -# Simulate extension install (before Azure registration) -simulate_install() { - log "Simulating Arc extension install (direct Helm)..." - log "This bypasses Arc and installs directly - use for local testing" - - # Check for GitHub credentials - if [ -z "$GITHUB_TOKEN" ] || [ -z "$GITHUB_USERNAME" ]; then - warn "GITHUB_TOKEN and GITHUB_USERNAME not set" - warn "Set them if ghcr.io requires authentication" - else - log "Authenticating with ghcr.io..." - echo "$GITHUB_TOKEN" | helm registry login ghcr.io --username "$GITHUB_USERNAME" --password-stdin - fi - - # Install using Helm (simulates what Arc does) - log "Installing DocumentDB operator via Helm..." - helm upgrade --install documentdb-operator \ - oci://ghcr.io/${GITHUB_ORG}/documentdb-operator \ - --version "$CHART_VERSION" \ - --namespace documentdb-operator \ - --create-namespace \ - --values "$(dirname "$0")/values-arc.yaml" \ - --wait \ - --timeout 10m - - # Verify - log "Verifying installation..." - kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=documentdb-operator -n documentdb-operator --timeout=300s - kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=cloudnative-pg -n cnpg-system --timeout=300s - - success "Simulated extension install complete" - - echo "" - log "Installed components:" - kubectl get pods -n documentdb-operator - kubectl get pods -n cnpg-system -} - -# Install via Arc extension (requires registration) -install_extension() { - log "Installing DocumentDB operator via Azure Arc extension..." - - # Check Arc connection - az connectedk8s show --name "$CLUSTER_NAME" --resource-group "$RESOURCE_GROUP" &>/dev/null \ - || error "Cluster not connected to Arc. Run: $0 --connect-arc" - - # Install extension - az k8s-extension create \ - --name documentdb-operator \ - --extension-type Microsoft.DocumentDB.Operator \ - --cluster-name "$CLUSTER_NAME" \ - --resource-group "$RESOURCE_GROUP" \ - --cluster-type connectedClusters \ - --release-train stable \ - --configuration-settings documentDbVersion="$CHART_VERSION" - - # Verify - log "Verifying extension..." - az k8s-extension show \ - --name documentdb-operator \ - --cluster-name "$CLUSTER_NAME" \ - --resource-group "$RESOURCE_GROUP" \ - --cluster-type connectedClusters \ - --output table - - success "Extension installed via Azure Arc" -} - -# Show extension status -show_status() { - log "Extension status:" - - echo "" - echo "=== Azure Arc Extension ===" - az k8s-extension show \ - --name documentdb-operator \ - --cluster-name "$CLUSTER_NAME" \ - --resource-group "$RESOURCE_GROUP" \ - --cluster-type connectedClusters \ - --output table 2>/dev/null || warn "Extension not found in Azure" - - echo "" - echo "=== Kubernetes Pods ===" - echo "DocumentDB Operator:" - kubectl get pods -n documentdb-operator 2>/dev/null || warn "Namespace not found" - echo "" - echo "CNPG Operator:" - kubectl get pods -n cnpg-system 2>/dev/null || warn "Namespace not found" - echo "" - echo "Azure Arc Agent:" - kubectl get pods -n azure-arc 2>/dev/null || warn "Arc agent not installed" -} - -# Uninstall extension -uninstall_extension() { - log "Uninstalling extension..." - - # Try Arc uninstall first - az k8s-extension delete \ - --name documentdb-operator \ - --cluster-name "$CLUSTER_NAME" \ - --resource-group "$RESOURCE_GROUP" \ - --cluster-type connectedClusters \ - --yes 2>/dev/null || warn "Arc extension not found" - - # Helm uninstall (if simulated install) - helm uninstall documentdb-operator -n documentdb-operator 2>/dev/null || true - - # Cleanup namespaces - kubectl delete namespace documentdb-operator --ignore-not-found - kubectl delete namespace cnpg-system --ignore-not-found - - success "Extension uninstalled" -} - -# Cleanup everything -cleanup() { - log "Cleaning up..." - - # Disconnect from Arc - az connectedk8s delete \ - --name "$CLUSTER_NAME" \ - --resource-group "$RESOURCE_GROUP" \ - --yes 2>/dev/null || warn "Cluster not connected to Arc" - - # Delete Kind cluster - kind delete cluster --name "$CLUSTER_NAME" 2>/dev/null || warn "Kind cluster not found" - - # Optionally delete resource group - read -p "Delete resource group $RESOURCE_GROUP? (y/N) " -n 1 -r - echo - if [[ $REPLY =~ ^[Yy]$ ]]; then - az group delete --name "$RESOURCE_GROUP" --yes --no-wait - log "Resource group deletion initiated" - fi - - success "Cleanup complete" -} - -# Print usage -usage() { - cat <